From 69b21d6e6104761efec08a40545cdcbdc51c5559 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Mon, 14 Nov 2022 12:27:01 +0000 Subject: [PATCH] Revert "Merge master" --- .cargo/config.toml | 7 +- .github/workflows/auto-label-issues.yml | 4 +- .github/workflows/auto-label-prs.yml | 4 +- .github/workflows/md-link-check.yml | 2 +- .github/workflows/monthly-tag.yml | 2 +- .github/workflows/release-tagging.yml | 2 +- .gitlab-ci.yml | 73 +- .maintain/frame-weight-template.hbs | 44 +- Cargo.lock | 2311 ++++++++-------- Cargo.toml | 15 +- README.md | 1 - bin/node-template/README.md | 32 +- bin/node-template/docker-compose.yml | 2 +- bin/node-template/docs/rust-setup.md | 6 +- bin/node-template/node/Cargo.toml | 11 +- bin/node-template/node/src/benchmarking.rs | 2 +- bin/node-template/node/src/cli.rs | 6 +- bin/node-template/node/src/command.rs | 16 +- bin/node-template/node/src/service.rs | 22 +- bin/node-template/pallets/template/Cargo.toml | 2 +- bin/node-template/pallets/template/src/lib.rs | 28 +- .../pallets/template/src/mock.rs | 12 +- .../pallets/template/src/tests.rs | 7 +- bin/node-template/runtime/Cargo.toml | 11 +- bin/node-template/runtime/src/lib.rs | 122 +- bin/node/bench/Cargo.toml | 16 +- bin/node/bench/src/core.rs | 2 +- bin/node/bench/src/generator.rs | 11 +- bin/node/bench/src/import.rs | 9 +- bin/node/bench/src/main.rs | 10 +- bin/node/bench/src/tempdb.rs | 13 +- bin/node/bench/src/trie.rs | 7 +- bin/node/cli/Cargo.toml | 23 +- bin/node/cli/benches/block_production.rs | 9 +- bin/node/cli/benches/transaction_pool.rs | 7 +- bin/node/cli/build.rs | 2 +- bin/node/cli/src/chain_spec.rs | 56 +- bin/node/cli/src/cli.rs | 10 +- bin/node/cli/src/command.rs | 49 +- bin/node/cli/src/service.rs | 42 +- bin/node/cli/tests/benchmark_storage_works.rs | 1 - bin/node/cli/tests/common.rs | 8 +- bin/node/executor/Cargo.toml | 1 + bin/node/executor/benches/bench.rs | 52 +- bin/node/executor/tests/basic.rs | 318 ++- bin/node/executor/tests/common.rs | 51 +- bin/node/executor/tests/fees.rs | 91 +- bin/node/inspect/Cargo.toml | 3 +- bin/node/inspect/src/cli.rs | 4 +- bin/node/inspect/src/lib.rs | 9 +- bin/node/primitives/Cargo.toml | 1 - bin/node/rpc/Cargo.toml | 3 +- bin/node/rpc/src/lib.rs | 20 +- bin/node/runtime/Cargo.toml | 52 +- bin/node/runtime/src/impls.rs | 74 +- bin/node/runtime/src/lib.rs | 531 ++-- bin/node/runtime/src/voter_bags.rs | 9 +- bin/node/testing/src/bench.rs | 16 +- bin/utils/chain-spec-builder/Cargo.toml | 2 +- bin/utils/chain-spec-builder/src/main.rs | 22 +- bin/utils/subkey/Cargo.toml | 2 +- bin/utils/subkey/src/lib.rs | 2 +- client/allocator/src/freeing_bump.rs | 4 +- client/api/Cargo.toml | 2 +- client/api/src/backend.rs | 84 +- client/api/src/call_executor.rs | 15 +- client/api/src/client.rs | 13 +- client/api/src/execution_extensions.rs | 4 +- client/api/src/in_mem.rs | 103 +- client/api/src/leaves.rs | 260 +- client/api/src/proof_provider.rs | 16 +- client/authority-discovery/Cargo.toml | 10 +- client/authority-discovery/src/error.rs | 2 +- client/authority-discovery/src/lib.rs | 16 +- client/authority-discovery/src/service.rs | 2 +- client/authority-discovery/src/tests.rs | 7 +- client/authority-discovery/src/worker.rs | 120 +- .../src/worker/addr_cache.rs | 7 +- .../src/worker/schema/tests.rs | 5 +- .../authority-discovery/src/worker/tests.rs | 83 +- client/basic-authorship/Cargo.toml | 2 +- .../basic-authorship/src/basic_authorship.rs | 23 +- client/beefy/Cargo.toml | 7 +- client/beefy/rpc/Cargo.toml | 4 +- client/beefy/rpc/src/lib.rs | 58 +- client/beefy/rpc/src/notification.rs | 12 +- client/beefy/src/communication/mod.rs | 113 - client/beefy/src/communication/peers.rs | 131 - .../incoming_requests_handler.rs | 198 -- .../src/communication/request_response/mod.rs | 101 - .../outgoing_requests_engine.rs | 241 -- .../beefy/src/{communication => }/gossip.rs | 28 +- client/beefy/src/import.rs | 95 +- client/beefy/src/justification.rs | 55 +- client/beefy/src/keystore.rs | 9 +- client/beefy/src/lib.rs | 155 +- .../src/{communication => }/notification.rs | 21 +- client/beefy/src/round.rs | 48 +- client/beefy/src/tests.rs | 488 ++-- client/beefy/src/worker.rs | 679 ++--- client/block-builder/src/lib.rs | 5 +- client/chain-spec/Cargo.toml | 4 +- client/chain-spec/src/chain_spec.rs | 2 +- client/chain-spec/src/lib.rs | 2 +- client/cli/Cargo.toml | 15 +- client/cli/src/arg_enums.rs | 120 +- client/cli/src/commands/build_spec_cmd.rs | 4 +- client/cli/src/commands/chain_info_cmd.rs | 5 +- client/cli/src/commands/check_block_cmd.rs | 4 +- client/cli/src/commands/export_blocks_cmd.rs | 8 +- client/cli/src/commands/export_state_cmd.rs | 12 +- client/cli/src/commands/generate.rs | 4 +- client/cli/src/commands/generate_node_key.rs | 10 +- client/cli/src/commands/import_blocks_cmd.rs | 6 +- client/cli/src/commands/insert_key.rs | 10 +- client/cli/src/commands/inspect_key.rs | 14 +- client/cli/src/commands/inspect_node_key.rs | 11 +- client/cli/src/commands/purge_chain_cmd.rs | 2 +- client/cli/src/commands/revert_cmd.rs | 2 +- client/cli/src/commands/run_cmd.rs | 86 +- client/cli/src/commands/sign.rs | 10 +- client/cli/src/commands/utils.rs | 17 +- client/cli/src/commands/vanity.rs | 8 +- client/cli/src/commands/verify.rs | 10 +- client/cli/src/config.rs | 42 +- client/cli/src/error.rs | 10 +- client/cli/src/lib.rs | 18 +- client/cli/src/params/database_params.rs | 10 +- client/cli/src/params/import_params.rs | 74 +- client/cli/src/params/keystore_params.rs | 22 +- client/cli/src/params/mod.rs | 22 +- client/cli/src/params/network_params.rs | 44 +- client/cli/src/params/node_key_params.rs | 10 +- .../cli/src/params/offchain_worker_params.rs | 10 +- client/cli/src/params/pruning_params.rs | 45 +- client/cli/src/params/shared_params.rs | 27 +- .../cli/src/params/transaction_pool_params.rs | 6 +- client/consensus/aura/Cargo.toml | 4 +- client/consensus/aura/src/import_queue.rs | 130 +- client/consensus/aura/src/lib.rs | 179 +- client/consensus/babe/Cargo.toml | 15 +- client/consensus/babe/rpc/Cargo.toml | 2 +- client/consensus/babe/rpc/src/lib.rs | 18 +- client/consensus/babe/src/authorship.rs | 4 +- client/consensus/babe/src/aux_schema.rs | 10 +- client/consensus/babe/src/lib.rs | 224 +- client/consensus/babe/src/migration.rs | 4 +- client/consensus/babe/src/tests.rs | 188 +- client/consensus/common/Cargo.toml | 6 +- client/consensus/common/src/block_import.rs | 27 +- client/consensus/common/src/import_queue.rs | 20 +- .../common/src/import_queue/basic_queue.rs | 12 +- .../common/src/import_queue/buffered_link.rs | 6 +- client/consensus/epochs/src/lib.rs | 113 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/consensus/manual-seal/src/consensus.rs | 2 +- .../manual-seal/src/consensus/babe.rs | 31 +- .../manual-seal/src/consensus/timestamp.rs | 6 +- .../manual-seal/src/finalize_block.rs | 4 +- client/consensus/manual-seal/src/lib.rs | 88 +- client/consensus/pow/Cargo.toml | 4 +- client/consensus/pow/src/lib.rs | 80 +- client/consensus/pow/src/worker.rs | 6 +- client/consensus/slots/Cargo.toml | 3 +- client/consensus/slots/src/lib.rs | 120 +- client/consensus/slots/src/slots.rs | 14 +- client/db/Cargo.toml | 24 +- client/db/benches/state_access.rs | 311 --- client/db/src/bench.rs | 109 +- client/db/src/lib.rs | 1084 +++----- client/db/src/record_stats_state.rs | 230 -- client/db/src/storage_cache.rs | 6 +- client/db/src/upgrade.rs | 9 +- client/executor/Cargo.toml | 12 +- client/executor/common/Cargo.toml | 5 +- client/executor/common/src/error.rs | 4 + .../common/src/sandbox/wasmi_backend.rs | 17 +- client/executor/runtime-test/Cargo.toml | 2 + client/executor/runtime-test/src/lib.rs | 528 ++-- client/executor/src/integration_tests/mod.rs | 79 +- client/executor/src/native_executor.rs | 285 +- client/executor/src/wasm_runtime.rs | 11 +- client/executor/wasmi/Cargo.toml | 2 +- client/executor/wasmi/src/lib.rs | 2 - client/executor/wasmtime/Cargo.toml | 16 +- client/executor/wasmtime/src/host.rs | 3 +- .../executor/wasmtime/src/instance_wrapper.rs | 26 +- client/executor/wasmtime/src/runtime.rs | 28 +- client/executor/wasmtime/src/tests.rs | 81 +- client/finality-grandpa/Cargo.toml | 14 +- client/finality-grandpa/src/aux_schema.rs | 10 +- .../src/communication/gossip.rs | 206 +- .../finality-grandpa/src/communication/mod.rs | 88 +- .../src/communication/periodic.rs | 17 +- .../src/communication/tests.rs | 171 +- client/finality-grandpa/src/environment.rs | 64 +- client/finality-grandpa/src/finality_proof.rs | 18 +- client/finality-grandpa/src/import.rs | 12 +- client/finality-grandpa/src/justification.rs | 61 +- client/finality-grandpa/src/lib.rs | 67 +- client/finality-grandpa/src/tests.rs | 100 +- client/finality-grandpa/src/until_imported.rs | 25 +- client/finality-grandpa/src/voting_rule.rs | 2 +- client/finality-grandpa/src/warp_proof.rs | 9 +- client/informant/Cargo.toml | 4 +- client/informant/src/display.rs | 71 +- client/informant/src/lib.rs | 7 +- client/keystore/Cargo.toml | 8 +- client/keystore/src/local.rs | 8 +- client/network-gossip/Cargo.toml | 7 +- client/network-gossip/src/bridge.rs | 159 +- client/network-gossip/src/lib.rs | 82 +- client/network-gossip/src/state_machine.rs | 138 +- client/network-gossip/src/validator.rs | 3 +- client/network/Cargo.toml | 20 +- client/network/bitswap/Cargo.toml | 40 - client/network/bitswap/src/lib.rs | 524 ---- client/network/{bitswap => }/build.rs | 0 client/network/common/Cargo.toml | 12 +- client/network/common/src/config.rs | 293 +- client/network/common/src/lib.rs | 10 - client/network/common/src/protocol.rs | 147 -- client/network/common/src/protocol/role.rs | 121 - .../network/common/src/request_responses.rs | 47 +- client/network/common/src/service.rs | 631 ----- client/network/common/src/sync.rs | 41 +- client/network/common/src/sync/message.rs | 30 +- client/network/common/src/sync/warp.rs | 3 - client/network/light/Cargo.toml | 7 +- .../light/src/light_client_requests.rs | 24 +- .../src/light_client_requests/handler.rs | 122 +- .../network/light/src/schema/light.v1.proto | 51 +- client/network/src/behaviour.rs | 475 ++-- client/network/src/bitswap.rs | 340 +++ client/network/src/config.rs | 386 ++- client/network/src/discovery.rs | 898 ++++--- client/network/{common => }/src/error.rs | 6 +- client/network/src/lib.rs | 77 +- client/network/src/peer_info.rs | 9 +- client/network/src/protocol.rs | 238 +- .../{common => }/src/protocol/event.rs | 38 +- client/network/src/protocol/message.rs | 70 +- .../src/protocol/notifications/behaviour.rs | 9 +- .../src/protocol/notifications/handler.rs | 9 +- .../notifications/upgrade/notifications.rs | 65 +- client/network/src/request_responses.rs | 76 +- client/network/{bitswap => }/src/schema.rs | 2 +- .../src/schema/bitswap.v1.2.0.proto | 0 client/network/src/service.rs | 1173 ++++---- client/network/src/service/metrics.rs | 35 +- client/network/src/service/out_events.rs | 3 +- .../{common => }/src/service/signature.rs | 5 +- .../service/{tests/service.rs => tests.rs} | 560 ++-- .../network/src/service/tests/chain_sync.rs | 402 --- client/network/src/service/tests/mod.rs | 323 --- .../src/lib.rs => src/transactions.rs} | 142 +- client/network/{common => }/src/utils.rs | 0 client/network/sync/Cargo.toml | 16 +- .../network/sync/src/block_request_handler.rs | 66 +- client/network/sync/src/blocks.rs | 54 +- client/network/sync/src/extra_requests.rs | 8 +- client/network/sync/src/lib.rs | 385 +-- client/network/sync/src/mock.rs | 122 - client/network/sync/src/schema/api.v1.proto | 3 +- client/network/sync/src/service/chain_sync.rs | 58 - client/network/sync/src/service/mock.rs | 75 - client/network/sync/src/service/mod.rs | 23 - client/network/sync/src/service/network.rs | 128 - client/network/sync/src/state.rs | 27 +- .../network/sync/src/state_request_handler.rs | 52 +- client/network/sync/src/tests.rs | 61 - client/network/sync/src/warp.rs | 104 +- .../network/sync/src/warp_request_handler.rs | 31 +- client/network/test/Cargo.toml | 6 +- client/network/test/src/block_import.rs | 2 +- client/network/test/src/lib.rs | 129 +- client/network/test/src/sync.rs | 136 +- client/network/transactions/Cargo.toml | 28 - client/network/transactions/src/config.rs | 98 - client/offchain/Cargo.toml | 8 +- client/offchain/src/api.rs | 96 +- client/offchain/src/lib.rs | 102 +- client/peerset/Cargo.toml | 4 +- client/rpc-api/Cargo.toml | 4 +- client/rpc-api/src/dev/error.rs | 5 - client/rpc-api/src/state/mod.rs | 2 +- client/rpc-api/src/system/helpers.rs | 12 +- client/rpc-servers/Cargo.toml | 2 +- client/rpc-servers/src/lib.rs | 3 +- client/rpc-servers/src/middleware.rs | 18 +- client/rpc-spec-v2/Cargo.toml | 33 - client/rpc-spec-v2/README.md | 5 - client/rpc-spec-v2/src/chain_spec/api.rs | 53 - .../rpc-spec-v2/src/chain_spec/chain_spec.rs | 60 - client/rpc-spec-v2/src/chain_spec/mod.rs | 38 - client/rpc-spec-v2/src/chain_spec/tests.rs | 61 - client/rpc-spec-v2/src/lib.rs | 30 - client/rpc-spec-v2/src/transaction/api.rs | 37 - client/rpc-spec-v2/src/transaction/error.rs | 100 - client/rpc-spec-v2/src/transaction/event.rs | 353 --- client/rpc-spec-v2/src/transaction/mod.rs | 38 - .../src/transaction/transaction.rs | 208 -- client/rpc/Cargo.toml | 5 +- client/rpc/src/chain/tests.rs | 16 +- client/rpc/src/dev/tests.rs | 4 +- client/rpc/src/state/state_full.rs | 44 +- client/rpc/src/system/tests.rs | 9 +- client/service/Cargo.toml | 15 +- client/service/src/builder.rs | 196 +- .../service/src/chain_ops/export_raw_state.rs | 15 +- client/service/src/chain_ops/revert_chain.rs | 7 - client/service/src/client/call_executor.rs | 82 +- client/service/src/client/client.rs | 188 +- client/service/src/config.rs | 58 +- client/service/src/error.rs | 3 +- client/service/src/lib.rs | 28 +- client/service/src/metrics.rs | 8 +- client/service/test/Cargo.toml | 6 +- client/service/test/src/client/mod.rs | 165 +- client/service/test/src/lib.rs | 40 +- client/state-db/Cargo.toml | 4 +- client/state-db/src/lib.rs | 368 ++- client/state-db/src/noncanonical.rs | 254 +- client/state-db/src/pruning.rs | 852 ++---- client/state-db/src/test.rs | 34 +- client/sync-state-rpc/Cargo.toml | 2 +- client/sysinfo/Cargo.toml | 5 +- client/sysinfo/src/lib.rs | 27 +- client/sysinfo/src/sysinfo.rs | 187 +- client/telemetry/Cargo.toml | 8 +- client/tracing/Cargo.toml | 4 +- client/tracing/src/block/mod.rs | 2 +- client/tracing/src/logging/mod.rs | 9 +- client/transaction-pool/Cargo.toml | 7 +- client/transaction-pool/api/Cargo.toml | 4 - client/transaction-pool/api/src/lib.rs | 66 +- client/transaction-pool/benches/basics.rs | 14 +- client/transaction-pool/src/api.rs | 34 +- .../transaction-pool/src/enactment_state.rs | 607 ----- client/transaction-pool/src/graph/listener.rs | 17 +- client/transaction-pool/src/graph/pool.rs | 32 +- client/transaction-pool/src/graph/rotator.rs | 2 +- client/transaction-pool/src/graph/watcher.rs | 8 +- client/transaction-pool/src/lib.rs | 360 ++- client/transaction-pool/src/tests.rs | 11 +- client/transaction-pool/tests/pool.rs | 673 +---- client/utils/Cargo.toml | 2 +- docs/CHANGELOG.md | 2 +- docs/CODEOWNERS | 19 +- docs/Upgrading-2.0-to-3.0.md | 8 +- frame/alliance/Cargo.toml | 10 +- frame/alliance/README.md | 4 +- frame/alliance/src/benchmarking.rs | 152 +- frame/alliance/src/lib.rs | 319 +-- frame/alliance/src/migration.rs | 79 - frame/alliance/src/mock.rs | 143 +- frame/alliance/src/tests.rs | 428 +-- frame/alliance/src/types.rs | 27 - frame/alliance/src/weights.rs | 636 ++--- frame/assets/Cargo.toml | 2 +- frame/assets/src/benchmarking.rs | 16 +- frame/assets/src/impl_fungibles.rs | 20 - frame/assets/src/lib.rs | 82 +- frame/assets/src/mock.rs | 42 +- frame/assets/src/tests.rs | 632 ++--- frame/assets/src/types.rs | 2 +- frame/assets/src/weights.rs | 421 ++- frame/atomic-swap/src/lib.rs | 13 +- frame/atomic-swap/src/tests.rs | 36 +- frame/aura/src/mock.rs | 22 +- frame/authority-discovery/src/lib.rs | 10 +- frame/authorship/src/lib.rs | 12 +- frame/babe/Cargo.toml | 2 +- frame/babe/src/benchmarking.rs | 29 +- frame/babe/src/default_weights.rs | 14 +- frame/babe/src/equivocation.rs | 4 +- frame/babe/src/lib.rs | 9 +- frame/babe/src/mock.rs | 31 +- frame/babe/src/tests.rs | 54 +- frame/bags-list/Cargo.toml | 11 +- frame/bags-list/fuzzer/Cargo.toml | 2 +- frame/bags-list/fuzzer/src/main.rs | 2 +- frame/bags-list/remote-tests/src/lib.rs | 35 +- frame/bags-list/remote-tests/src/migration.rs | 8 +- .../src/{try_state.rs => sanity_check.rs} | 13 +- frame/bags-list/remote-tests/src/snapshot.rs | 14 +- frame/bags-list/src/benchmarks.rs | 18 +- frame/bags-list/src/lib.rs | 33 +- frame/bags-list/src/list/mod.rs | 45 +- frame/bags-list/src/list/tests.rs | 26 +- frame/bags-list/src/migrations.rs | 23 +- frame/bags-list/src/mock.rs | 13 +- frame/bags-list/src/tests.rs | 59 +- frame/bags-list/src/weights.rs | 79 +- frame/balances/Cargo.toml | 2 +- frame/balances/src/benchmarking.rs | 20 +- frame/balances/src/lib.rs | 45 +- frame/balances/src/tests.rs | 309 +-- frame/balances/src/tests_composite.rs | 17 +- frame/balances/src/tests_local.rs | 34 +- frame/balances/src/tests_reentrancy.rs | 39 +- frame/balances/src/weights.rs | 107 +- frame/beefy-mmr/Cargo.toml | 7 +- frame/beefy-mmr/primitives/Cargo.toml | 15 +- frame/beefy-mmr/primitives/src/lib.rs | 272 +- frame/beefy-mmr/src/lib.rs | 35 +- frame/beefy-mmr/src/mock.rs | 11 +- frame/beefy-mmr/src/tests.rs | 62 +- frame/beefy/Cargo.toml | 1 - frame/beefy/src/lib.rs | 3 +- frame/beefy/src/mock.rs | 8 +- frame/benchmarking/Cargo.toml | 9 +- frame/benchmarking/README.md | 4 +- frame/benchmarking/src/analysis.rs | 310 +-- frame/benchmarking/src/baseline.rs | 8 +- frame/benchmarking/src/lib.rs | 102 +- frame/benchmarking/src/tests.rs | 119 +- frame/benchmarking/src/tests_instance.rs | 95 +- frame/benchmarking/src/utils.rs | 22 +- frame/benchmarking/src/weights.rs | 105 +- frame/bounties/Cargo.toml | 1 - frame/bounties/src/benchmarking.rs | 47 +- frame/bounties/src/lib.rs | 11 +- frame/bounties/src/migrations/v4.rs | 4 +- frame/bounties/src/tests.rs | 295 +-- frame/bounties/src/weights.rs | 199 +- frame/child-bounties/Cargo.toml | 1 - frame/child-bounties/src/benchmarking.rs | 2 +- frame/child-bounties/src/lib.rs | 7 +- frame/child-bounties/src/tests.rs | 356 ++- frame/child-bounties/src/weights.rs | 131 +- frame/collective/Cargo.toml | 2 +- frame/collective/src/benchmarking.rs | 97 +- frame/collective/src/lib.rs | 93 +- frame/collective/src/migrations/v4.rs | 4 +- frame/collective/src/tests.rs | 481 ++-- frame/collective/src/weights.rs | 373 ++- frame/contracts/Cargo.toml | 11 +- frame/contracts/README.md | 29 +- .../{primitives => common}/Cargo.toml | 9 +- .../{primitives => common}/README.md | 0 .../{primitives => common}/src/lib.rs | 102 +- frame/contracts/fixtures/call_runtime.wat | 4 +- .../fixtures/create_storage_and_call.wat | 55 - .../fixtures/delegate_call_simple.wat | 50 - .../contracts/fixtures/float_instruction.wat | 11 - frame/contracts/proc-macro/src/lib.rs | 452 +--- frame/contracts/rpc/Cargo.toml | 30 + frame/contracts/rpc/README.md | 3 + frame/contracts/rpc/runtime-api/Cargo.toml | 34 + frame/contracts/rpc/runtime-api/README.md | 7 + frame/contracts/rpc/runtime-api/src/lib.rs | 85 + frame/contracts/rpc/src/lib.rs | 524 ++++ frame/contracts/src/benchmarking/code.rs | 14 +- frame/contracts/src/benchmarking/mod.rs | 86 +- frame/contracts/src/chain_extension.rs | 109 +- frame/contracts/src/exec.rs | 411 ++- frame/contracts/src/gas.rs | 79 +- frame/contracts/src/lib.rs | 499 ++-- frame/contracts/src/migration.rs | 301 +-- frame/contracts/src/schedule.rs | 141 +- frame/contracts/src/storage.rs | 71 +- frame/contracts/src/storage/meter.rs | 654 ++--- frame/contracts/src/tests.rs | 1590 +++-------- frame/contracts/src/wasm/code_cache.rs | 35 +- frame/contracts/src/wasm/env_def/macros.rs | 396 +++ frame/contracts/src/wasm/env_def/mod.rs | 9 +- frame/contracts/src/wasm/mod.rs | 391 +-- frame/contracts/src/wasm/prepare.rs | 57 +- frame/contracts/src/wasm/runtime.rs | 2062 +++++++-------- frame/contracts/src/weights.rs | 2351 +++++++---------- frame/conviction-voting/Cargo.toml | 5 +- frame/conviction-voting/src/benchmarking.rs | 129 +- frame/conviction-voting/src/lib.rs | 29 +- frame/conviction-voting/src/tests.rs | 360 ++- frame/conviction-voting/src/types.rs | 3 - frame/conviction-voting/src/vote.rs | 2 +- frame/conviction-voting/src/weights.rs | 143 +- frame/democracy/Cargo.toml | 9 +- frame/democracy/src/benchmarking.rs | 420 ++- frame/democracy/src/conviction.rs | 16 +- frame/democracy/src/lib.rs | 776 ++++-- frame/democracy/src/migrations.rs | 236 -- frame/democracy/src/tests.rs | 92 +- frame/democracy/src/tests/cancellation.rs | 37 +- frame/democracy/src/tests/decoders.rs | 40 +- frame/democracy/src/tests/delegation.rs | 84 +- .../democracy/src/tests/external_proposing.rs | 109 +- frame/democracy/src/tests/fast_tracking.rs | 55 +- frame/democracy/src/tests/lock_voting.rs | 194 +- frame/democracy/src/tests/preimage.rs | 219 ++ frame/democracy/src/tests/public_proposals.rs | 74 +- frame/democracy/src/tests/scheduling.rs | 61 +- frame/democracy/src/tests/voting.rs | 49 +- frame/democracy/src/types.rs | 28 +- frame/democracy/src/vote.rs | 48 +- frame/democracy/src/vote_threshold.rs | 6 +- frame/democracy/src/weights.rs | 571 ++-- frame/dex/rpc/src/lib.rs | 2 +- frame/dex/src/lib.rs | 2 +- frame/dex/src/mock.rs | 25 +- frame/dex/src/tests.rs | 59 +- .../election-provider-multi-phase/Cargo.toml | 5 +- .../src/benchmarking.rs | 25 +- .../election-provider-multi-phase/src/lib.rs | 507 +--- .../src/migrations.rs | 78 - .../election-provider-multi-phase/src/mock.rs | 104 +- .../src/signed.rs | 527 +--- .../src/unsigned.rs | 512 +--- .../src/weights.rs | 249 +- frame/election-provider-support/Cargo.toml | 1 - .../benchmarking/Cargo.toml | 2 +- .../solution-type/fuzzer/Cargo.toml | 2 +- frame/election-provider-support/src/lib.rs | 165 +- .../election-provider-support/src/onchain.rs | 179 +- .../election-provider-support/src/weights.rs | 32 +- frame/elections-phragmen/Cargo.toml | 1 - frame/elections-phragmen/src/benchmarking.rs | 30 +- frame/elections-phragmen/src/lib.rs | 874 +++--- frame/elections-phragmen/src/migrations/v3.rs | 4 +- frame/elections-phragmen/src/migrations/v4.rs | 4 +- frame/elections-phragmen/src/migrations/v5.rs | 2 +- frame/elections-phragmen/src/weights.rs | 259 +- frame/examples/basic/Cargo.toml | 2 +- frame/examples/basic/src/benchmarking.rs | 21 +- frame/examples/basic/src/lib.rs | 20 +- frame/examples/basic/src/tests.rs | 31 +- frame/examples/basic/src/weights.rs | 90 +- frame/examples/offchain-worker/Cargo.toml | 2 +- frame/examples/offchain-worker/src/lib.rs | 5 +- frame/examples/offchain-worker/src/tests.rs | 48 +- frame/examples/parallel/Cargo.toml | 38 + frame/examples/parallel/README.md | 7 + frame/examples/parallel/src/lib.rs | 151 ++ frame/examples/parallel/src/tests.rs | 148 ++ frame/executive/Cargo.toml | 5 +- frame/executive/README.md | 2 +- frame/executive/src/lib.rs | 252 +- frame/fast-unstake/Cargo.toml | 66 - frame/fast-unstake/src/benchmarking.rs | 201 -- frame/fast-unstake/src/lib.rs | 495 ---- frame/fast-unstake/src/migrations.rs | 77 - frame/fast-unstake/src/mock.rs | 379 --- frame/fast-unstake/src/tests.rs | 1200 --------- frame/fast-unstake/src/types.rs | 41 - frame/fast-unstake/src/weights.rs | 212 -- frame/gilt/Cargo.toml | 2 +- frame/gilt/src/benchmarking.rs | 6 +- frame/gilt/src/lib.rs | 6 +- frame/gilt/src/mock.rs | 10 +- frame/gilt/src/tests.rs | 158 +- frame/gilt/src/weights.rs | 181 +- frame/grandpa/Cargo.toml | 2 +- frame/grandpa/src/default_weights.rs | 12 +- frame/grandpa/src/equivocation.rs | 4 +- frame/grandpa/src/lib.rs | 13 +- frame/grandpa/src/migrations/v4.rs | 4 +- frame/grandpa/src/mock.rs | 32 +- frame/grandpa/src/tests.rs | 24 +- frame/identity/Cargo.toml | 2 +- frame/identity/src/benchmarking.rs | 140 +- frame/identity/src/lib.rs | 45 +- frame/identity/src/tests.rs | 283 +- frame/identity/src/weights.rs | 500 ++-- frame/im-online/Cargo.toml | 1 - frame/im-online/src/lib.rs | 8 +- frame/im-online/src/mock.rs | 54 +- frame/im-online/src/tests.rs | 59 +- frame/im-online/src/weights.rs | 43 +- frame/indices/Cargo.toml | 1 - frame/indices/src/benchmarking.rs | 6 +- frame/indices/src/lib.rs | 9 +- frame/indices/src/mock.rs | 12 +- frame/indices/src/tests.rs | 13 +- frame/indices/src/weights.rs | 79 +- frame/lottery/Cargo.toml | 1 - frame/lottery/src/benchmarking.rs | 6 +- frame/lottery/src/lib.rs | 30 +- frame/lottery/src/mock.rs | 12 +- frame/lottery/src/tests.rs | 178 +- frame/lottery/src/weights.rs | 99 +- frame/membership/Cargo.toml | 2 +- frame/membership/README.md | 4 +- frame/membership/src/lib.rs | 128 +- frame/membership/src/migrations/v4.rs | 4 +- frame/membership/src/weights.rs | 173 +- frame/merkle-mountain-range/Cargo.toml | 6 +- frame/merkle-mountain-range/rpc/Cargo.toml | 3 +- frame/merkle-mountain-range/rpc/src/lib.rs | 267 +- .../merkle-mountain-range/src/benchmarking.rs | 2 - .../src/default_weights.rs | 4 +- frame/merkle-mountain-range/src/lib.rs | 169 +- frame/merkle-mountain-range/src/mmr/mmr.rs | 16 +- frame/merkle-mountain-range/src/mmr/mod.rs | 4 +- .../merkle-mountain-range/src/mmr/storage.rs | 99 +- frame/merkle-mountain-range/src/mock.rs | 19 +- frame/merkle-mountain-range/src/tests.rs | 410 +-- frame/multisig/Cargo.toml | 4 - frame/multisig/src/benchmarking.rs | 148 +- frame/multisig/src/lib.rs | 204 +- frame/multisig/src/migrations.rs | 86 - frame/multisig/src/tests.rs | 588 +++-- frame/multisig/src/weights.rs | 282 +- frame/nicks/src/lib.rs | 67 +- frame/node-authorization/src/lib.rs | 19 +- frame/node-authorization/src/mock.rs | 8 +- frame/node-authorization/src/tests.rs | 100 +- frame/node-authorization/src/weights.rs | 18 +- frame/nomination-pools/Cargo.toml | 13 +- .../nomination-pools/benchmarking/Cargo.toml | 25 +- .../nomination-pools/benchmarking/src/lib.rs | 175 +- .../nomination-pools/benchmarking/src/mock.rs | 28 +- frame/nomination-pools/fuzzer/Cargo.toml | 33 - frame/nomination-pools/fuzzer/src/call.rs | 353 --- frame/nomination-pools/src/lib.rs | 505 ++-- frame/nomination-pools/src/migration.rs | 80 +- frame/nomination-pools/src/mock.rs | 139 +- frame/nomination-pools/src/tests.rs | 988 +++---- frame/nomination-pools/src/weights.rs | 363 ++- .../nomination-pools/test-staking/src/lib.rs | 207 +- .../nomination-pools/test-staking/src/mock.rs | 34 +- frame/offences/benchmarking/Cargo.toml | 12 +- frame/offences/benchmarking/src/lib.rs | 23 +- frame/offences/benchmarking/src/mock.rs | 35 +- frame/offences/src/lib.rs | 5 +- frame/offences/src/mock.rs | 29 +- frame/offences/src/tests.rs | 8 +- frame/preimage/Cargo.toml | 8 +- frame/preimage/src/benchmarking.rs | 44 +- frame/preimage/src/lib.rs | 248 +- frame/preimage/src/migration.rs | 263 -- frame/preimage/src/mock.rs | 13 +- frame/preimage/src/tests.rs | 353 +-- frame/preimage/src/weights.rs | 217 +- frame/proxy/Cargo.toml | 1 - frame/proxy/src/benchmarking.rs | 76 +- frame/proxy/src/lib.rs | 173 +- frame/proxy/src/tests.rs | 257 +- frame/proxy/src/weights.rs | 297 +-- frame/randomness-collective-flip/src/lib.rs | 10 +- frame/ranked-collective/Cargo.toml | 2 +- frame/ranked-collective/src/benchmarking.rs | 29 +- frame/ranked-collective/src/lib.rs | 214 +- frame/ranked-collective/src/tests.rs | 267 +- frame/ranked-collective/src/weights.rs | 162 +- frame/recovery/Cargo.toml | 2 +- frame/recovery/src/benchmarking.rs | 32 +- frame/recovery/src/lib.rs | 57 +- frame/recovery/src/mock.rs | 14 +- frame/recovery/src/tests.rs | 228 +- frame/recovery/src/weights.rs | 177 +- frame/referenda/Cargo.toml | 2 +- frame/referenda/src/benchmarking.rs | 480 ++-- frame/referenda/src/branch.rs | 16 +- frame/referenda/src/lib.rs | 167 +- frame/referenda/src/mock.rs | 74 +- frame/referenda/src/tests.rs | 120 +- frame/referenda/src/types.rs | 52 +- frame/referenda/src/weights.rs | 389 ++- frame/remark/Cargo.toml | 2 - frame/remark/src/benchmarking.rs | 4 +- frame/remark/src/lib.rs | 2 +- frame/remark/src/mock.rs | 8 +- frame/remark/src/tests.rs | 9 +- frame/remark/src/weights.rs | 29 +- frame/root-offences/Cargo.toml | 51 - frame/root-offences/README.md | 5 - frame/root-offences/src/lib.rs | 131 - frame/root-offences/src/mock.rs | 359 --- frame/root-offences/src/tests.rs | 94 - frame/scheduler/Cargo.toml | 2 +- frame/scheduler/src/benchmarking.rs | 316 ++- frame/scheduler/src/lib.rs | 1015 +++---- frame/scheduler/src/migration.rs | 402 --- frame/scheduler/src/mock.rs | 103 +- frame/scheduler/src/tests.rs | 1388 ++-------- frame/scheduler/src/weights.rs | 397 ++- frame/scored-pool/README.md | 2 +- frame/scored-pool/src/lib.rs | 78 +- frame/scored-pool/src/mock.rs | 54 +- frame/scored-pool/src/tests.rs | 84 +- frame/session/benchmarking/Cargo.toml | 6 +- frame/session/benchmarking/src/lib.rs | 1 - frame/session/benchmarking/src/mock.rs | 19 +- frame/session/src/historical/mod.rs | 45 +- frame/session/src/historical/offchain.rs | 11 +- frame/session/src/lib.rs | 4 +- frame/session/src/migrations/v1.rs | 4 +- frame/session/src/mock.rs | 97 +- frame/session/src/tests.rs | 38 +- frame/session/src/weights.rs | 37 +- frame/society/src/lib.rs | 34 +- frame/society/src/mock.rs | 12 +- frame/society/src/tests.rs | 274 +- frame/staking/Cargo.toml | 11 +- frame/staking/reward-curve/src/lib.rs | 2 +- frame/staking/src/benchmarking.rs | 53 +- frame/staking/src/lib.rs | 72 +- frame/staking/src/migrations.rs | 170 +- frame/staking/src/mock.rs | 178 +- frame/staking/src/pallet/impls.rs | 457 +--- frame/staking/src/pallet/mod.rs | 319 +-- frame/staking/src/slashing.rs | 9 +- frame/staking/src/testing_utils.rs | 27 +- frame/staking/src/tests.rs | 1213 +++------ frame/staking/src/weights.rs | 814 +++--- frame/state-trie-migration/Cargo.toml | 4 +- frame/state-trie-migration/src/lib.rs | 207 +- frame/state-trie-migration/src/weights.rs | 97 +- frame/sudo/README.md | 2 +- frame/sudo/src/extension.rs | 107 - frame/sudo/src/lib.rs | 36 +- frame/sudo/src/mock.rs | 33 +- frame/sudo/src/tests.rs | 106 +- frame/support/Cargo.toml | 20 +- frame/support/procedural/Cargo.toml | 2 - .../src/construct_runtime/expand/call.rs | 73 +- .../src/construct_runtime/expand/config.rs | 21 +- .../src/construct_runtime/expand/event.rs | 51 +- .../src/construct_runtime/expand/inherent.rs | 55 +- .../src/construct_runtime/expand/metadata.rs | 10 - .../src/construct_runtime/expand/origin.rs | 112 +- .../src/construct_runtime/expand/unsigned.rs | 23 +- .../procedural/src/construct_runtime/mod.rs | 218 +- .../procedural/src/construct_runtime/parse.rs | 31 +- frame/support/procedural/src/lib.rs | 710 +---- .../procedural/src/pallet/expand/call.rs | 58 +- .../procedural/src/pallet/expand/error.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 11 +- .../src/pallet/expand/genesis_build.rs | 2 +- .../procedural/src/pallet/expand/hooks.rs | 40 +- .../procedural/src/pallet/expand/mod.rs | 2 +- .../src/pallet/expand/pallet_struct.rs | 27 +- .../procedural/src/pallet/expand/storage.rs | 217 +- frame/support/procedural/src/pallet/mod.rs | 22 +- .../procedural/src/pallet/parse/call.rs | 32 +- .../procedural/src/pallet/parse/config.rs | 24 +- .../procedural/src/pallet/parse/mod.rs | 44 +- .../procedural/src/pallet/parse/storage.rs | 138 +- frame/support/procedural/src/storage/mod.rs | 1 - frame/support/src/dispatch.rs | 936 +------ frame/support/src/error.rs | 2 +- frame/support/src/lib.rs | 1083 +++----- frame/support/src/migrations.rs | 7 +- .../src/storage/generator/double_map.rs | 4 +- frame/support/src/storage/generator/map.rs | 4 +- frame/support/src/storage/generator/mod.rs | 6 +- frame/support/src/storage/generator/nmap.rs | 4 +- frame/support/src/storage/mod.rs | 25 +- .../support/src/storage/storage_noop_guard.rs | 114 - .../support/src/storage/types/counted_map.rs | 21 +- frame/support/src/storage/types/double_map.rs | 19 +- frame/support/src/storage/types/map.rs | 27 +- frame/support/src/storage/types/mod.rs | 32 +- frame/support/src/traits.rs | 33 +- frame/support/src/traits/dispatch.rs | 46 +- frame/support/src/traits/filter.rs | 11 + frame/support/src/traits/hooks.rs | 360 +-- frame/support/src/traits/members.rs | 9 +- frame/support/src/traits/metadata.rs | 41 +- frame/support/src/traits/misc.rs | 334 +-- frame/support/src/traits/preimages.rs | 317 --- frame/support/src/traits/schedule.rs | 127 +- frame/support/src/traits/storage.rs | 33 +- frame/support/src/traits/tokens/currency.rs | 4 +- .../src/traits/tokens/fungible/balanced.rs | 7 +- frame/support/src/traits/tokens/fungibles.rs | 1 - .../src/traits/tokens/fungibles/approvals.rs | 2 +- .../src/traits/tokens/fungibles/balanced.rs | 9 +- .../src/traits/tokens/fungibles/roles.rs | 29 - .../support/src/traits/tokens/nonfungible.rs | 18 +- .../support/src/traits/tokens/nonfungibles.rs | 17 +- frame/support/src/traits/try_runtime.rs | 138 - frame/support/src/traits/voting.rs | 13 - frame/support/src/weights.rs | 1019 ++++++- frame/support/src/weights/block_weights.rs | 44 +- .../support/src/weights/extrinsic_weights.rs | 44 +- frame/support/src/weights/paritydb_weights.rs | 21 +- frame/support/src/weights/rocksdb_weights.rs | 21 +- frame/support/test/Cargo.toml | 8 +- frame/support/test/compile_pass/src/lib.rs | 8 +- frame/support/test/src/lib.rs | 4 +- frame/support/test/tests/construct_runtime.rs | 280 +- .../test/tests/construct_runtime_ui.rs | 6 +- .../both_use_and_excluded_parts.rs | 2 +- .../both_use_and_excluded_parts.stderr | 25 +- .../exclude_undefined_part.rs | 2 +- .../exclude_undefined_part.stderr | 25 +- .../feature_gated_system_pallet.rs | 14 - .../feature_gated_system_pallet.stderr | 5 - .../invalid_meta_literal.rs | 15 - .../invalid_meta_literal.stderr | 6 - .../no_std_genesis_config.rs | 8 +- .../no_std_genesis_config.stderr | 15 +- .../old_unsupported_pallet_decl.rs | 2 +- .../pallet_error_too_large.rs | 8 +- .../pallet_error_too_large.stderr | 2 +- .../undefined_call_part.rs | 8 +- .../undefined_call_part.stderr | 2 +- .../undefined_event_part.rs | 8 +- .../undefined_event_part.stderr | 48 +- .../undefined_genesis_config_part.rs | 8 +- .../undefined_genesis_config_part.stderr | 15 +- .../undefined_inherent_part.rs | 8 +- .../undefined_inherent_part.stderr | 12 +- .../undefined_origin_part.rs | 8 +- .../undefined_origin_part.stderr | 54 +- .../undefined_validate_unsigned_part.rs | 8 +- .../undefined_validate_unsigned_part.stderr | 23 +- .../unsupported_meta_structure.rs | 15 - .../unsupported_meta_structure.stderr | 6 - .../unsupported_pallet_attr.rs | 15 - .../unsupported_pallet_attr.stderr | 5 - .../use_undefined_part.rs | 2 +- .../use_undefined_part.stderr | 25 +- ...served_keyword_two_times_integrity_test.rs | 2 +- ...ed_keyword_two_times_integrity_test.stderr | 4 +- ...eserved_keyword_two_times_on_initialize.rs | 2 +- ...ved_keyword_two_times_on_initialize.stderr | 6 +- frame/support/test/tests/decl_storage.rs | 16 +- .../tests/decl_storage_ui/config_duplicate.rs | 2 +- .../decl_storage_ui/config_get_duplicate.rs | 2 +- .../tests/decl_storage_ui/get_duplicate.rs | 2 +- .../tests/derive_no_bound_ui/debug.stderr | 4 +- frame/support/test/tests/final_keys.rs | 4 +- frame/support/test/tests/genesisconfig.rs | 4 +- frame/support/test/tests/instance.rs | 46 +- frame/support/test/tests/issue2219.rs | 10 +- frame/support/test/tests/origin.rs | 60 +- frame/support/test/tests/pallet.rs | 428 +-- .../test/tests/pallet_compatibility.rs | 30 +- .../tests/pallet_compatibility_instance.rs | 39 +- frame/support/test/tests/pallet_instance.rs | 198 +- .../tests/pallet_ui/attr_non_empty.stderr | 4 +- .../call_argument_invalid_bound.stderr | 4 +- .../call_argument_invalid_bound_2.stderr | 8 +- .../call_argument_invalid_bound_3.stderr | 4 +- .../tests/pallet_ui/dev_mode_without_arg.rs | 33 - .../pallet_ui/dev_mode_without_arg.stderr | 11 - .../dev_mode_without_arg_max_encoded_len.rs | 34 - ...ev_mode_without_arg_max_encoded_len.stderr | 17 - .../tests/pallet_ui/event_field_not_member.rs | 2 +- .../pallet_ui/event_field_not_member.stderr | 4 +- .../tests/pallet_ui/event_not_in_trait.stderr | 2 +- .../pallet_ui/event_type_invalid_bound.rs | 2 +- .../pallet_ui/event_type_invalid_bound.stderr | 4 +- .../pallet_ui/event_type_invalid_bound_2.rs | 2 +- .../event_type_invalid_bound_2.stderr | 4 +- .../tests/pallet_ui/hooks_invalid_item.stderr | 6 +- .../tests/pallet_ui/pallet_invalid_arg.rs | 4 - .../tests/pallet_ui/pallet_invalid_arg.stderr | 5 - .../tests/pallet_ui/pass/dev_mode_valid.rs | 35 - ...age_ensure_span_are_ok_on_wrong_gen.stderr | 46 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 46 +- .../pallet_ui/storage_info_unsatisfied.stderr | 4 +- .../storage_info_unsatisfied_nmap.stderr | 6 +- .../storage_invalid_attribute.stderr | 2 +- .../pallet_ui/storage_not_storage_type.stderr | 2 +- .../storage_result_query_missing_generics.rs | 21 - ...orage_result_query_missing_generics.stderr | 15 - ...storage_result_query_multiple_type_args.rs | 23 - ...age_result_query_multiple_type_args.stderr | 5 - ...ge_result_query_no_defined_pallet_error.rs | 16 - ...esult_query_no_defined_pallet_error.stderr | 5 - ...age_result_query_parenthesized_generics.rs | 22 - ...result_query_parenthesized_generics.stderr | 5 - ...storage_result_query_wrong_generic_kind.rs | 22 - ...age_result_query_wrong_generic_kind.stderr | 5 - .../tests/pallet_with_name_trait_is_valid.rs | 16 +- frame/support/test/tests/storage_layers.rs | 31 +- .../support/test/tests/storage_transaction.rs | 4 +- frame/support/test/tests/system.rs | 12 +- frame/system/Cargo.toml | 2 - frame/system/benches/bench.rs | 17 +- frame/system/benchmarking/Cargo.toml | 6 - frame/system/benchmarking/src/lib.rs | 53 +- frame/system/benchmarking/src/mock.rs | 6 +- frame/system/src/extensions/check_genesis.rs | 2 +- .../system/src/extensions/check_mortality.rs | 14 +- .../src/extensions/check_non_zero_sender.rs | 6 +- frame/system/src/extensions/check_nonce.rs | 6 +- .../src/extensions/check_spec_version.rs | 2 +- .../system/src/extensions/check_tx_version.rs | 2 +- frame/system/src/extensions/check_weight.rs | 213 +- frame/system/src/lib.rs | 92 +- frame/system/src/limits.rs | 40 +- frame/system/src/migrations/mod.rs | 6 +- frame/system/src/mock.rs | 24 +- frame/system/src/mocking.rs | 2 +- frame/system/src/offchain.rs | 8 +- frame/system/src/tests.rs | 336 +-- frame/system/src/weights.rs | 113 +- frame/timestamp/Cargo.toml | 3 +- frame/timestamp/src/lib.rs | 2 - frame/timestamp/src/mock.rs | 19 +- frame/timestamp/src/tests.rs | 11 +- frame/timestamp/src/weights.rs | 29 +- frame/tips/Cargo.toml | 1 - frame/tips/src/benchmarking.rs | 21 +- frame/tips/src/lib.rs | 12 +- frame/tips/src/migrations/v4.rs | 4 +- frame/tips/src/tests.rs | 158 +- frame/tips/src/weights.rs | 153 +- frame/transaction-payment/Cargo.toml | 2 +- .../asset-tx-payment/Cargo.toml | 3 +- .../asset-tx-payment/src/lib.rs | 15 +- .../asset-tx-payment/src/payment.rs | 16 +- .../asset-tx-payment/src/tests.rs | 127 +- frame/transaction-payment/rpc/Cargo.toml | 1 - .../rpc/runtime-api/Cargo.toml | 2 - .../rpc/runtime-api/src/lib.rs | 16 - frame/transaction-payment/rpc/src/lib.rs | 50 +- frame/transaction-payment/src/lib.rs | 539 ++-- frame/transaction-payment/src/payment.rs | 16 +- frame/transaction-payment/src/types.rs | 24 +- frame/transaction-storage/Cargo.toml | 7 +- frame/transaction-storage/src/benchmarking.rs | 131 +- frame/transaction-storage/src/lib.rs | 10 +- frame/transaction-storage/src/mock.rs | 14 +- frame/transaction-storage/src/tests.rs | 18 +- frame/transaction-storage/src/weights.rs | 71 +- frame/treasury/Cargo.toml | 1 - frame/treasury/src/benchmarking.rs | 13 +- frame/treasury/src/lib.rs | 18 +- frame/treasury/src/tests.rs | 135 +- frame/treasury/src/weights.rs | 121 +- frame/try-runtime/Cargo.toml | 7 +- frame/try-runtime/src/lib.rs | 4 +- frame/uniques/Cargo.toml | 2 +- frame/uniques/src/benchmarking.rs | 40 +- frame/uniques/src/functions.rs | 27 +- frame/uniques/src/impl_nonfungibles.rs | 23 +- frame/uniques/src/lib.rs | 104 +- frame/uniques/src/migration.rs | 7 +- frame/uniques/src/mock.rs | 10 +- frame/uniques/src/tests.rs | 615 ++--- frame/uniques/src/weights.rs | 456 ++-- frame/utility/Cargo.toml | 3 - frame/utility/src/benchmarking.rs | 14 +- frame/utility/src/lib.rs | 101 +- frame/utility/src/tests.rs | 454 +--- frame/utility/src/weights.rs | 63 +- frame/vesting/Cargo.toml | 1 - frame/vesting/README.md | 3 +- frame/vesting/src/benchmarking.rs | 22 +- frame/vesting/src/lib.rs | 31 +- frame/vesting/src/mock.rs | 17 +- frame/vesting/src/weights.rs | 281 +- frame/whitelist/Cargo.toml | 1 - frame/whitelist/src/benchmarking.rs | 12 +- frame/whitelist/src/lib.rs | 27 +- frame/whitelist/src/mock.rs | 17 +- frame/whitelist/src/tests.rs | 79 +- frame/whitelist/src/weights.rs | 83 +- primitives/api/Cargo.toml | 6 +- primitives/api/proc-macro/Cargo.toml | 2 +- primitives/api/proc-macro/src/common.rs | 41 - .../api/proc-macro/src/decl_runtime_apis.rs | 701 +++-- .../api/proc-macro/src/impl_runtime_apis.rs | 344 +-- primitives/api/proc-macro/src/lib.rs | 1 - .../proc-macro/src/mock_impl_runtime_apis.rs | 138 +- primitives/api/proc-macro/src/utils.rs | 43 +- primitives/api/src/lib.rs | 112 +- primitives/api/test/benches/bench.rs | 12 +- primitives/api/test/tests/decl_and_impl.rs | 39 +- primitives/api/test/tests/runtime_calls.rs | 30 +- primitives/api/test/tests/trybuild.rs | 1 - .../ui/impl_incorrect_method_signature.stderr | 44 +- .../api/test/tests/ui/impl_missing_version.rs | 40 - .../test/tests/ui/impl_missing_version.stderr | 14 - ...pi_version_1.rs => invalid_api_version.rs} | 0 ...on_1.stderr => invalid_api_version.stderr} | 4 +- .../tests/ui/invalid_api_version_2.stderr | 4 +- .../tests/ui/invalid_api_version_3.stderr | 4 +- .../test/tests/ui/invalid_api_version_4.rs | 8 - .../tests/ui/invalid_api_version_4.stderr | 5 - .../ui/method_ver_lower_than_trait_ver.rs | 9 - .../ui/method_ver_lower_than_trait_ver.stderr | 11 - .../test/tests/ui/missing_versioned_method.rs | 39 - .../tests/ui/missing_versioned_method.stderr | 8 - .../missing_versioned_method_multiple_vers.rs | 42 - ...sing_versioned_method_multiple_vers.stderr | 8 - .../ui/mock_advanced_block_id_by_value.rs | 2 +- .../ui/mock_advanced_block_id_by_value.stderr | 4 +- .../tests/ui/mock_advanced_missing_blockid.rs | 2 +- .../ui/mock_advanced_missing_blockid.stderr | 4 +- .../tests/ui/mock_only_self_reference.stderr | 74 +- .../tests/ui/positive_cases/default_impls.rs | 41 - ...reference_in_impl_runtime_apis_call.stderr | 48 +- primitives/arithmetic/Cargo.toml | 2 +- primitives/arithmetic/fuzzer/Cargo.toml | 4 +- primitives/authorship/Cargo.toml | 2 +- primitives/beefy/Cargo.toml | 9 +- primitives/beefy/src/commitment.rs | 158 +- primitives/beefy/src/lib.rs | 82 +- primitives/beefy/src/mmr.rs | 112 +- primitives/beefy/src/payload.rs | 105 - primitives/beefy/src/witness.rs | 22 +- .../beefy/test-res/large-raw-commitment | Bin 44638 -> 0 bytes primitives/blockchain/Cargo.toml | 4 +- primitives/blockchain/src/backend.rs | 14 +- primitives/blockchain/src/header_metadata.rs | 13 +- primitives/consensus/aura/Cargo.toml | 2 +- primitives/consensus/aura/src/lib.rs | 2 +- primitives/consensus/babe/Cargo.toml | 2 +- primitives/consensus/babe/src/lib.rs | 33 +- primitives/consensus/common/Cargo.toml | 2 +- primitives/consensus/common/src/lib.rs | 77 +- primitives/core/Cargo.toml | 33 +- primitives/core/hashing/Cargo.toml | 3 +- primitives/core/src/crypto.rs | 53 +- primitives/core/src/defer.rs | 140 - primitives/core/src/ecdsa.rs | 29 +- primitives/core/src/ed25519.rs | 78 +- primitives/core/src/lib.rs | 319 +-- primitives/core/src/sr25519.rs | 52 +- primitives/core/src/testing.rs | 4 +- primitives/core/src/traits.rs | 15 +- primitives/core/src/uint.rs | 3 + primitives/database/Cargo.toml | 4 +- primitives/finality-grandpa/src/lib.rs | 63 +- primitives/inherents/Cargo.toml | 6 +- primitives/io/Cargo.toml | 11 +- primitives/io/src/lib.rs | 62 +- primitives/keystore/Cargo.toml | 4 +- primitives/merkle-mountain-range/Cargo.toml | 4 +- primitives/merkle-mountain-range/src/lib.rs | 113 +- primitives/npos-elections/fuzzer/Cargo.toml | 4 +- .../npos-elections/fuzzer/src/common.rs | 6 +- .../npos-elections/fuzzer/src/phragmen_pjr.rs | 8 +- .../npos-elections/fuzzer/src/reduce.rs | 2 +- primitives/npos-elections/src/lib.rs | 14 +- primitives/npos-elections/src/mock.rs | 16 +- primitives/npos-elections/src/pjr.rs | 2 +- primitives/npos-elections/src/tests.rs | 45 +- primitives/panic-handler/Cargo.toml | 2 +- primitives/rpc/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 4 +- primitives/runtime-interface/src/lib.rs | 4 +- primitives/runtime/Cargo.toml | 7 +- primitives/{core => runtime}/src/bounded.rs | 0 .../src/bounded/bounded_btree_map.rs | 111 +- .../src/bounded/bounded_btree_set.rs | 4 +- .../src/bounded/bounded_vec.rs | 128 +- .../src/bounded/weak_bounded_vec.rs | 6 +- primitives/runtime/src/curve.rs | 2 +- .../runtime/src/generic/checked_extrinsic.rs | 9 +- primitives/runtime/src/generic/era.rs | 4 +- primitives/runtime/src/lib.rs | 123 +- .../runtime/src/offchain/storage_lock.rs | 14 +- primitives/runtime/src/testing.rs | 11 +- primitives/runtime/src/traits.rs | 220 +- .../runtime/src/transaction_validity.rs | 5 +- primitives/sandbox/Cargo.toml | 10 +- primitives/sandbox/src/embedded_executor.rs | 4 +- primitives/serializer/Cargo.toml | 2 +- primitives/staking/src/lib.rs | 141 +- primitives/staking/src/offence.rs | 7 +- primitives/state-machine/Cargo.toml | 5 +- primitives/state-machine/src/backend.rs | 32 +- primitives/state-machine/src/basic.rs | 200 +- .../state-machine/src/in_memory_backend.rs | 17 +- primitives/state-machine/src/lib.rs | 368 ++- .../src/overlayed_changes/changeset.rs | 31 +- .../src/overlayed_changes/mod.rs | 25 +- .../state-machine/src/proving_backend.rs | 611 +++++ primitives/state-machine/src/read_only.rs | 26 +- primitives/state-machine/src/testing.rs | 20 +- primitives/state-machine/src/trie_backend.rs | 982 +------ .../state-machine/src/trie_backend_essence.rs | 498 +--- primitives/storage/Cargo.toml | 2 +- primitives/storage/src/lib.rs | 16 +- primitives/tasks/Cargo.toml | 36 + primitives/tasks/README.md | 3 + primitives/tasks/src/async_externalities.rs | 212 ++ primitives/tasks/src/lib.rs | 257 ++ primitives/test-primitives/Cargo.toml | 2 +- primitives/timestamp/Cargo.toml | 2 +- primitives/timestamp/src/lib.rs | 25 +- .../transaction-storage-proof/Cargo.toml | 2 +- .../transaction-storage-proof/src/lib.rs | 3 +- primitives/trie/Cargo.toml | 22 +- primitives/trie/src/cache/mod.rs | 692 ----- primitives/trie/src/cache/shared_cache.rs | 677 ----- primitives/trie/src/error.rs | 21 +- primitives/trie/src/lib.rs | 226 +- primitives/trie/src/node_codec.rs | 50 +- primitives/trie/src/node_header.rs | 5 +- primitives/trie/src/recorder.rs | 302 --- primitives/trie/src/storage_proof.rs | 70 +- primitives/trie/src/trie_codec.rs | 18 +- primitives/trie/src/trie_stream.rs | 3 +- primitives/version/Cargo.toml | 4 +- primitives/wasm-interface/Cargo.toml | 4 +- primitives/weights/Cargo.toml | 40 - primitives/weights/src/lib.rs | 322 --- primitives/weights/src/weight_meter.rs | 176 -- primitives/weights/src/weight_v2.rs | 462 ---- scripts/ci/gitlab/pipeline/build.yml | 8 +- scripts/ci/gitlab/pipeline/check.yml | 14 +- scripts/ci/gitlab/pipeline/test.yml | 76 +- scripts/ci/node-template-release/Cargo.toml | 2 +- scripts/ci/node-template-release/src/main.rs | 4 +- scripts/run_all_benchmarks.sh | 2 - test-utils/client/Cargo.toml | 6 +- test-utils/client/src/client_ext.rs | 8 +- test-utils/client/src/lib.rs | 13 +- test-utils/runtime/Cargo.toml | 7 +- test-utils/runtime/client/src/lib.rs | 2 +- test-utils/runtime/src/genesismap.rs | 12 +- test-utils/runtime/src/lib.rs | 123 +- test-utils/runtime/src/system.rs | 116 +- .../runtime/transaction-pool/Cargo.toml | 2 +- .../runtime/transaction-pool/src/lib.rs | 24 +- utils/frame/benchmarking-cli/Cargo.toml | 13 +- .../frame/benchmarking-cli/src/block/bench.rs | 23 +- utils/frame/benchmarking-cli/src/block/cmd.rs | 18 +- .../benchmarking-cli/src/extrinsic/bench.rs | 14 +- .../benchmarking-cli/src/extrinsic/cmd.rs | 25 +- utils/frame/benchmarking-cli/src/lib.rs | 18 +- .../benchmarking-cli/src/machine/hardware.rs | 129 +- .../frame/benchmarking-cli/src/machine/mod.rs | 31 +- .../src/machine/reference_hardware.json | 20 +- .../benchmarking-cli/src/overhead/README.md | 59 +- .../benchmarking-cli/src/overhead/cmd.rs | 27 +- .../benchmarking-cli/src/overhead/template.rs | 9 - .../benchmarking-cli/src/overhead/weights.hbs | 48 +- .../benchmarking-cli/src/pallet/command.rs | 117 +- .../frame/benchmarking-cli/src/pallet/mod.rs | 66 +- .../benchmarking-cli/src/pallet/template.hbs | 13 +- .../benchmarking-cli/src/pallet/writer.rs | 58 +- .../benchmarking-cli/src/shared/README.md | 1 - .../frame/benchmarking-cli/src/shared/mod.rs | 8 +- .../benchmarking-cli/src/shared/stats.rs | 2 +- .../src/shared/weight_params.rs | 8 +- .../benchmarking-cli/src/storage/README.md | 1 - .../frame/benchmarking-cli/src/storage/cmd.rs | 68 +- .../benchmarking-cli/src/storage/read.rs | 52 +- .../benchmarking-cli/src/storage/template.rs | 16 +- .../benchmarking-cli/src/storage/weights.hbs | 35 +- .../benchmarking-cli/src/storage/write.rs | 199 +- utils/frame/frame-utilities-cli/Cargo.toml | 2 +- .../frame-utilities-cli/src/pallet_id.rs | 13 +- utils/frame/generate-bags/Cargo.toml | 4 +- .../generate-bags/node-runtime/Cargo.toml | 2 +- .../generate-bags/node-runtime/src/main.rs | 6 +- utils/frame/generate-bags/src/lib.rs | 15 - utils/frame/remote-externalities/Cargo.toml | 4 +- utils/frame/remote-externalities/src/lib.rs | 139 +- .../frame/remote-externalities/src/rpc_api.rs | 100 + utils/frame/rpc/client/Cargo.toml | 25 - utils/frame/rpc/client/src/lib.rs | 265 -- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 25 +- utils/frame/rpc/support/Cargo.toml | 2 - utils/frame/rpc/support/src/lib.rs | 94 +- utils/frame/try-runtime/cli/Cargo.toml | 14 +- .../cli/src/commands/execute_block.rs | 79 +- .../cli/src/commands/follow_chain.rs | 110 +- .../cli/src/commands/offchain_worker.rs | 26 +- .../cli/src/commands/on_runtime_upgrade.rs | 21 +- utils/frame/try-runtime/cli/src/lib.rs | 122 +- utils/frame/try-runtime/cli/src/parse.rs | 9 - utils/wasm-builder/Cargo.toml | 2 +- utils/wasm-builder/src/wasm_project.rs | 55 +- 1165 files changed, 39094 insertions(+), 67772 deletions(-) delete mode 100644 client/beefy/src/communication/mod.rs delete mode 100644 client/beefy/src/communication/peers.rs delete mode 100644 client/beefy/src/communication/request_response/incoming_requests_handler.rs delete mode 100644 client/beefy/src/communication/request_response/mod.rs delete mode 100644 client/beefy/src/communication/request_response/outgoing_requests_engine.rs rename client/beefy/src/{communication => }/gossip.rs (93%) rename client/beefy/src/{communication => }/notification.rs (71%) delete mode 100644 client/db/benches/state_access.rs delete mode 100644 client/db/src/record_stats_state.rs delete mode 100644 client/network/bitswap/Cargo.toml delete mode 100644 client/network/bitswap/src/lib.rs rename client/network/{bitswap => }/build.rs (100%) delete mode 100644 client/network/common/src/protocol.rs delete mode 100644 client/network/common/src/protocol/role.rs delete mode 100644 client/network/common/src/service.rs create mode 100644 client/network/src/bitswap.rs rename client/network/{common => }/src/error.rs (96%) rename client/network/{common => }/src/protocol/event.rs (76%) rename client/network/{bitswap => }/src/schema.rs (97%) rename client/network/{bitswap => }/src/schema/bitswap.v1.2.0.proto (100%) rename client/network/{common => }/src/service/signature.rs (96%) rename client/network/src/service/{tests/service.rs => tests.rs} (50%) delete mode 100644 client/network/src/service/tests/chain_sync.rs delete mode 100644 client/network/src/service/tests/mod.rs rename client/network/{transactions/src/lib.rs => src/transactions.rs} (82%) rename client/network/{common => }/src/utils.rs (100%) delete mode 100644 client/network/sync/src/mock.rs delete mode 100644 client/network/sync/src/service/chain_sync.rs delete mode 100644 client/network/sync/src/service/mock.rs delete mode 100644 client/network/sync/src/service/mod.rs delete mode 100644 client/network/sync/src/service/network.rs delete mode 100644 client/network/sync/src/tests.rs delete mode 100644 client/network/transactions/Cargo.toml delete mode 100644 client/network/transactions/src/config.rs delete mode 100644 client/rpc-spec-v2/Cargo.toml delete mode 100644 client/rpc-spec-v2/README.md delete mode 100644 client/rpc-spec-v2/src/chain_spec/api.rs delete mode 100644 client/rpc-spec-v2/src/chain_spec/chain_spec.rs delete mode 100644 client/rpc-spec-v2/src/chain_spec/mod.rs delete mode 100644 client/rpc-spec-v2/src/chain_spec/tests.rs delete mode 100644 client/rpc-spec-v2/src/lib.rs delete mode 100644 client/rpc-spec-v2/src/transaction/api.rs delete mode 100644 client/rpc-spec-v2/src/transaction/error.rs delete mode 100644 client/rpc-spec-v2/src/transaction/event.rs delete mode 100644 client/rpc-spec-v2/src/transaction/mod.rs delete mode 100644 client/rpc-spec-v2/src/transaction/transaction.rs delete mode 100644 client/transaction-pool/src/enactment_state.rs delete mode 100644 frame/alliance/src/migration.rs rename frame/bags-list/remote-tests/src/{try_state.rs => sanity_check.rs} (83%) rename frame/contracts/{primitives => common}/Cargo.toml (70%) rename frame/contracts/{primitives => common}/README.md (100%) rename frame/contracts/{primitives => common}/src/lib.rs (72%) delete mode 100644 frame/contracts/fixtures/create_storage_and_call.wat delete mode 100644 frame/contracts/fixtures/delegate_call_simple.wat delete mode 100644 frame/contracts/fixtures/float_instruction.wat create mode 100644 frame/contracts/rpc/Cargo.toml create mode 100644 frame/contracts/rpc/README.md create mode 100644 frame/contracts/rpc/runtime-api/Cargo.toml create mode 100644 frame/contracts/rpc/runtime-api/README.md create mode 100644 frame/contracts/rpc/runtime-api/src/lib.rs create mode 100644 frame/contracts/rpc/src/lib.rs create mode 100644 frame/contracts/src/wasm/env_def/macros.rs delete mode 100644 frame/democracy/src/migrations.rs create mode 100644 frame/democracy/src/tests/preimage.rs delete mode 100644 frame/election-provider-multi-phase/src/migrations.rs create mode 100644 frame/examples/parallel/Cargo.toml create mode 100644 frame/examples/parallel/README.md create mode 100644 frame/examples/parallel/src/lib.rs create mode 100644 frame/examples/parallel/src/tests.rs delete mode 100644 frame/fast-unstake/Cargo.toml delete mode 100644 frame/fast-unstake/src/benchmarking.rs delete mode 100644 frame/fast-unstake/src/lib.rs delete mode 100644 frame/fast-unstake/src/migrations.rs delete mode 100644 frame/fast-unstake/src/mock.rs delete mode 100644 frame/fast-unstake/src/tests.rs delete mode 100644 frame/fast-unstake/src/types.rs delete mode 100644 frame/fast-unstake/src/weights.rs delete mode 100644 frame/multisig/src/migrations.rs delete mode 100644 frame/nomination-pools/fuzzer/Cargo.toml delete mode 100644 frame/nomination-pools/fuzzer/src/call.rs delete mode 100644 frame/preimage/src/migration.rs delete mode 100644 frame/root-offences/Cargo.toml delete mode 100644 frame/root-offences/README.md delete mode 100644 frame/root-offences/src/lib.rs delete mode 100644 frame/root-offences/src/mock.rs delete mode 100644 frame/root-offences/src/tests.rs delete mode 100644 frame/scheduler/src/migration.rs delete mode 100644 frame/sudo/src/extension.rs delete mode 100644 frame/support/src/storage/storage_noop_guard.rs delete mode 100644 frame/support/src/traits/preimages.rs delete mode 100644 frame/support/src/traits/tokens/fungibles/roles.rs delete mode 100644 frame/support/src/traits/try_runtime.rs delete mode 100644 frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs delete mode 100644 frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr delete mode 100644 frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs delete mode 100644 frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr delete mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs delete mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr delete mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs delete mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr delete mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs delete mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr delete mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs delete mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr delete mode 100644 frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs delete mode 100644 frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr delete mode 100644 frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs delete mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr delete mode 100644 primitives/api/proc-macro/src/common.rs delete mode 100644 primitives/api/test/tests/ui/impl_missing_version.rs delete mode 100644 primitives/api/test/tests/ui/impl_missing_version.stderr rename primitives/api/test/tests/ui/{invalid_api_version_1.rs => invalid_api_version.rs} (100%) rename primitives/api/test/tests/ui/{invalid_api_version_1.stderr => invalid_api_version.stderr} (65%) delete mode 100644 primitives/api/test/tests/ui/invalid_api_version_4.rs delete mode 100644 primitives/api/test/tests/ui/invalid_api_version_4.stderr delete mode 100644 primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs delete mode 100644 primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr delete mode 100644 primitives/api/test/tests/ui/missing_versioned_method.rs delete mode 100644 primitives/api/test/tests/ui/missing_versioned_method.stderr delete mode 100644 primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs delete mode 100644 primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr delete mode 100644 primitives/api/test/tests/ui/positive_cases/default_impls.rs delete mode 100644 primitives/beefy/src/payload.rs delete mode 100644 primitives/beefy/test-res/large-raw-commitment delete mode 100644 primitives/core/src/defer.rs rename primitives/{core => runtime}/src/bounded.rs (100%) rename primitives/{core => runtime}/src/bounded/bounded_btree_map.rs (81%) rename primitives/{core => runtime}/src/bounded/bounded_btree_set.rs (99%) rename primitives/{core => runtime}/src/bounded/bounded_vec.rs (90%) rename primitives/{core => runtime}/src/bounded/weak_bounded_vec.rs (99%) create mode 100644 primitives/state-machine/src/proving_backend.rs create mode 100644 primitives/tasks/Cargo.toml create mode 100644 primitives/tasks/README.md create mode 100644 primitives/tasks/src/async_externalities.rs create mode 100644 primitives/tasks/src/lib.rs delete mode 100644 primitives/trie/src/cache/mod.rs delete mode 100644 primitives/trie/src/cache/shared_cache.rs delete mode 100644 primitives/trie/src/recorder.rs delete mode 100644 primitives/weights/Cargo.toml delete mode 100644 primitives/weights/src/lib.rs delete mode 100644 primitives/weights/src/weight_meter.rs delete mode 100644 primitives/weights/src/weight_v2.rs create mode 100644 utils/frame/remote-externalities/src/rpc_api.rs delete mode 100644 utils/frame/rpc/client/Cargo.toml delete mode 100644 utils/frame/rpc/client/src/lib.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index 66b28b3485d86..de299a90971e4 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -12,21 +12,22 @@ rustflags = [ "-Aclippy::if-same-then-else", "-Aclippy::clone-double-ref", "-Dclippy::complexity", + "-Aclippy::clone_on_copy", # Too common + "-Aclippy::needless_lifetimes", # Backward compat? "-Aclippy::zero-prefixed-literal", # 00_1000_000 "-Aclippy::type_complexity", # raison d'etre "-Aclippy::nonminimal-bool", # maybe "-Aclippy::borrowed-box", # Reasonable to fix this one "-Aclippy::too-many-arguments", # (Turning this on would lead to) "-Aclippy::unnecessary_cast", # Types may change - "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::identity-op", # One case where we do 0 + "-Aclippy::useless_conversion", # Types may change "-Aclippy::unit_arg", # styalistic. "-Aclippy::option-map-unit-fn", # styalistic - "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic "-Aclippy::erasing_op", # E.g. 0 * DOLLARS "-Aclippy::eq_op", # In tests we test equality. "-Aclippy::while_immutable_condition", # false positives "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives - "-Aclippy::stable_sort_primitive", # prefer stable sort ] diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml index 2633bf55f0789..cd889b5941989 100644 --- a/.github/workflows/auto-label-issues.yml +++ b/.github/workflows/auto-label-issues.yml @@ -8,10 +8,10 @@ on: jobs: label-new-issues: - runs-on: ubuntu-latest + runs-on: ubuntu-latest steps: - name: Label drafts - uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 + uses: andymckay/labeler@master if: github.event.issue.author_association == 'NONE' with: add-labels: 'Z0-unconfirmed' diff --git a/.github/workflows/auto-label-prs.yml b/.github/workflows/auto-label-prs.yml index 50539b80b98b7..f0b8e9b343e29 100644 --- a/.github/workflows/auto-label-prs.yml +++ b/.github/workflows/auto-label-prs.yml @@ -8,13 +8,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Label drafts - uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 + uses: andymckay/labeler@master if: github.event.pull_request.draft == true with: add-labels: 'A3-inprogress' remove-labels: 'A0-pleasereview' - name: Label PRs - uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 + uses: andymckay/labeler@master if: github.event.pull_request.draft == false && ! contains(github.event.pull_request.labels.*.name, 'A2-insubstantial') with: add-labels: 'A0-pleasereview' diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml index e1387f6da13f7..4b6b9166be2df 100644 --- a/.github/workflows/md-link-check.yml +++ b/.github/workflows/md-link-check.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: gaurav-nelson/github-action-markdown-link-check@0a51127e9955b855a9bbfa1ff5577f1d1338c9a5 # 1.0.14 + - uses: gaurav-nelson/github-action-markdown-link-check@9710f0fec812ce0a3b98bef4c9d842fc1f39d976 # v1.0.13 with: use-quiet-mode: 'yes' config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/monthly-tag.yml b/.github/workflows/monthly-tag.yml index 6bec03d27e7be..ade8bd4717c39 100644 --- a/.github/workflows/monthly-tag.yml +++ b/.github/workflows/monthly-tag.yml @@ -32,7 +32,7 @@ jobs: ./scripts/ci/github/generate_changelog.sh ${{ steps.tags.outputs.old }} >> Changelog.md - name: Release snapshot id: release-snapshot - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 latest version, repo archived + uses: actions/create-release@latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/release-tagging.yml b/.github/workflows/release-tagging.yml index f7fa913c69709..c55fc13a626e0 100644 --- a/.github/workflows/release-tagging.yml +++ b/.github/workflows/release-tagging.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Set Git tag - uses: s3krit/walking-tag-action@d04f7a53b72ceda4e20283736ce3627011275178 # latest version from master + uses: s3krit/walking-tag-action@master with: TAG_NAME: release TAG_MESSAGE: Latest release diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9053e39eb59bf..9608d88e9554d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -41,7 +41,7 @@ workflow: - if: $CI_COMMIT_TAG - if: $CI_COMMIT_BRANCH -variables: +variables: &default-vars GIT_STRATEGY: fetch GIT_DEPTH: 100 CARGO_INCREMENTAL: 0 @@ -82,31 +82,18 @@ default: tags: - kubernetes-parity-build -.rust-info-script: +.rust-info-script: &rust-info-script script: - rustup show - cargo --version - rustup +nightly show - cargo +nightly --version -.pipeline-stopper-vars: - script: - - echo "Collecting env variables for the cancel-pipeline job" - - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env - - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env - - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env - -.pipeline-stopper-artifacts: - artifacts: - reports: - dotenv: pipeline-stopper.env - .docker-env: image: "${CI_IMAGE}" before_script: - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] - - !reference [.pipeline-stopper-vars, script] after_script: - !reference [.rusty-cachier, after_script] tags: @@ -131,18 +118,6 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -# handle the specific case where benches could store incorrect bench data because of the downstream staging runs -# exclude cargo-check-benches from such runs -.test-refs-check-benches: - rules: - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_IMAGE =~ /staging$/ - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - .test-refs-no-trigger: rules: - if: $CI_PIPELINE_SOURCE == "pipeline" @@ -210,7 +185,6 @@ skip-if-draft: - echo "Ref is ${CI_COMMIT_REF_NAME}" - echo "pipeline source is ${CI_PIPELINE_SOURCE}" - ./scripts/ci/gitlab/skip_if_draft.sh - allow_failure: true include: # check jobs @@ -272,52 +246,35 @@ rusty-cachier-notify: when: on_failure variables: PROJECT_ID: "${CI_PROJECT_ID}" - PROJECT_NAME: "${CI_PROJECT_NAME}" PIPELINE_ID: "${CI_PIPELINE_ID}" - FAILED_JOB_URL: "${FAILED_JOB_URL}" - FAILED_JOB_NAME: "${FAILED_JOB_NAME}" - PR_NUM: "${PR_NUM}" - trigger: - project: "parity/infrastructure/ci_cd/pipeline-stopper" - -# need to copy jobs this way because otherwise gitlab will wait -# for all 3 jobs to finish instead of cancelling if one fails -cancel-pipeline-test-linux-stable1: - extends: .cancel-pipeline-template - needs: - - job: "test-linux-stable 1/3" + trigger: "parity/infrastructure/ci_cd/pipeline-stopper" -cancel-pipeline-test-linux-stable2: +cancel-pipeline-test-linux-stable: extends: .cancel-pipeline-template needs: - - job: "test-linux-stable 2/3" - -cancel-pipeline-test-linux-stable3: - extends: .cancel-pipeline-template - needs: - - job: "test-linux-stable 3/3" - -cancel-pipeline-cargo-check-benches1: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-benches 1/2" - -cancel-pipeline-cargo-check-benches2: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-benches 2/2" + - job: test-linux-stable + artifacts: false cancel-pipeline-test-linux-stable-int: extends: .cancel-pipeline-template needs: - job: test-linux-stable-int + artifacts: false cancel-pipeline-cargo-check-subkey: extends: .cancel-pipeline-template needs: - job: cargo-check-subkey + artifacts: false + +cancel-pipeline-cargo-check-benches: + extends: .cancel-pipeline-template + needs: + - job: cargo-check-benches + artifacts: false cancel-pipeline-check-tracing: extends: .cancel-pipeline-template needs: - job: check-tracing + artifacts: false diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 360279129980f..593da06f4a7c0 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -1,4 +1,20 @@ -{{header}} +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //! Autogenerated weights for {{pallet}} //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} @@ -48,23 +64,22 @@ impl WeightInfo for SubstrateWeight { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. - Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) + ({{underscore benchmark.base_weight}} as Weight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as u64).saturating_mul({{cw.name}} as u64)) + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as u64)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as u64).saturating_mul({{cr.name}} as u64))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as u64)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as u64).saturating_mul({{cw.name}} as u64))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) {{/each}} } {{/each}} @@ -84,23 +99,22 @@ impl WeightInfo for () { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. - Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) + ({{underscore benchmark.base_weight}} as Weight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as u64).saturating_mul({{cw.name}} as u64)) + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as u64)) + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as u64).saturating_mul({{cr.name}} as u64))) + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as u64)) + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as u64).saturating_mul({{cw.name}} as u64))) + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) {{/each}} } {{/each}} diff --git a/Cargo.lock b/Cargo.lock index c2ca7686a0f92..070fc67e59d21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,30 +29,30 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" -version = "0.4.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" +checksum = "6e3e798aa0c8239776f54415bc06f3d74b1850f3f830b45c35cfc80556973f70" dependencies = [ "generic-array 0.14.4", ] [[package]] name = "aes" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" +checksum = "495ee669413bfbe9e8cace80f4d3d78e6d8c8d99579f97fb93bde351b185f2d4" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher", - "cpufeatures", + "cpufeatures 0.1.5", "opaque-debug 0.3.0", ] [[package]] name = "aes-gcm" -version = "0.9.4" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" +checksum = "b2a930fd487faaa92a30afa92cc9dd1526a5cff67124abbbb1c617ce070f4dcf" dependencies = [ "aead", "aes", @@ -93,9 +93,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" [[package]] name = "approx" @@ -112,18 +112,21 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" -[[package]] -name = "array-bytes" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a913633b0c922e6b745072795f50d90ebea78ba31a57e2ac8c2fc7b50950949" - [[package]] name = "arrayref" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + [[package]] name = "arrayvec" version = "0.5.2" @@ -258,7 +261,7 @@ checksum = "cf2c06e30a24e8c78a3987d07f0930edf76ef35e027e7bdb063fccafdad1f60c" dependencies = [ "async-io", "blocking", - "cfg-if", + "cfg-if 1.0.0", "event-listener", "futures-lite", "libc", @@ -298,9 +301,9 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.22.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723" +checksum = "0f2f8a4a203be3325981310ab243a28e6e4ea55b6519bffce05d41ab60e09ad8" dependencies = [ "async-std", "async-trait", @@ -340,9 +343,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ "proc-macro2", "quote", @@ -393,7 +396,7 @@ checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" dependencies = [ "addr2line", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide", "object 0.27.1", @@ -424,12 +427,6 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" -[[package]] -name = "base64ct" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" - [[package]] name = "beef" version = "0.5.1" @@ -443,15 +440,15 @@ dependencies = [ name = "beefy-gadget" version = "4.0.0-dev" dependencies = [ - "array-bytes", "async-trait", "beefy-primitives", "fnv", "futures", "futures-timer", + "hex", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -459,7 +456,6 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", - "sc-network-common", "sc-network-gossip", "sc-network-test", "sc-utils", @@ -495,7 +491,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-rpc", "sc-utils", "serde", @@ -511,46 +507,52 @@ dependencies = [ name = "beefy-merkle-tree" version = "4.0.0-dev" dependencies = [ - "array-bytes", "beefy-primitives", "env_logger", + "hex", + "hex-literal", "log", "sp-api", - "sp-runtime", + "tiny-keccak", ] [[package]] name = "beefy-primitives" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "hex", + "hex-literal", "parity-scale-codec", "scale-info", - "serde", "sp-api", "sp-application-crypto", "sp-core", - "sp-io", "sp-keystore", - "sp-mmr-primitives", "sp-runtime", "sp-std", ] +[[package]] +name = "bimap" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" + [[package]] name = "bincode" -version = "1.3.3" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" dependencies = [ + "byteorder", "serde", ] [[package]] name = "bindgen" -version = "0.60.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -585,13 +587,23 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "b94ba84325db59637ffc528bbe8c7f86c02c57cff5c0e2b9b00f9a851f42f309" dependencies = [ "digest 0.10.3", ] +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + [[package]] name = "blake2b_simd" version = "1.0.0" @@ -623,7 +635,7 @@ dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", - "cfg-if", + "cfg-if 1.0.0", "constant_time_eq", ] @@ -633,7 +645,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -645,6 +657,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ + "block-padding 0.2.1", "generic-array 0.14.4", ] @@ -666,6 +679,12 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "blocking" version = "1.0.2" @@ -748,9 +767,9 @@ dependencies = [ [[package]] name = "byteorder" -version = "1.4.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" @@ -817,9 +836,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.73" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" dependencies = [ "jobserver", ] @@ -834,13 +853,10 @@ dependencies = [ ] [[package]] -name = "cfg-expr" -version = "0.10.3" +name = "cfg-if" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aacacf4d96c24b2ad6eb8ee6df040e4f27b0d0b39a5710c30091baa830485db" -dependencies = [ - "smallvec", -] +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" @@ -848,29 +864,23 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - [[package]] name = "chacha20" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "01b72a433d0cf2aef113ba70f62634c56fddb0f244e6377185c56a7cadbd8f91" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher", - "cpufeatures", + "cpufeatures 0.2.1", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "3b84ed6d1d5f7aa9bdde921a5090e0ca4d934d250ea3b402a5fab3a994e28a2a" dependencies = [ "aead", "chacha20", @@ -884,9 +894,9 @@ name = "chain-spec-builder" version = "2.0.0" dependencies = [ "ansi_term", - "clap 4.0.11", + "clap 3.1.18", "node-cli", - "rand 0.8.5", + "rand 0.8.4", "sc-chain-spec", "sc-keystore", "sp-core", @@ -908,9 +918,9 @@ dependencies = [ [[package]] name = "cid" -version = "0.8.6" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ed9c8b2d17acb8110c46f1da5bf4a696d745e1474a16db0cd2b49cd0249bf2" +checksum = "a52cffa791ce5cf490ac3b2d6df970dc04f931b04e727be3c3e220e17164dfc4" dependencies = [ "core2", "multibase", @@ -930,11 +940,11 @@ dependencies = [ [[package]] name = "ckb-merkle-mountain-range" -version = "0.5.2" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ccb671c5921be8a84686e6212ca184cb1d7c51cadcdbfcbd1cc3f042f5dfb8" +checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -945,7 +955,7 @@ checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" dependencies = [ "glob", "libc", - "libloading", + "libloading 0.7.0", ] [[package]] @@ -955,39 +965,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", - "textwrap", + "textwrap 0.11.0", "unicode-width", ] [[package]] name = "clap" -version = "4.0.11" +version = "3.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" +checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", - "once_cell", + "indexmap", + "lazy_static", "strsim", "termcolor", + "textwrap 0.15.0", ] [[package]] name = "clap_complete" -version = "4.0.2" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11cba7abac9b56dfe2f035098cdb3a43946f276e6db83b72c4e692343f9aab9a" +checksum = "a394f7ec0715b42a4e52b294984c27c9a61f77c8d82f7774c5198350be143f19" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", ] [[package]] name = "clap_derive" -version = "4.0.10" +version = "3.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" +checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" dependencies = [ "heck", "proc-macro-error", @@ -998,21 +1010,20 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.3.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" dependencies = [ "os_str_bytes", ] [[package]] -name = "codespan-reporting" -version = "0.11.1" +name = "cmake" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +checksum = "b7b858541263efe664aead4a5209a4ae5c5d2811167d4ed4ee0944503f8d2089" dependencies = [ - "termcolor", - "unicode-width", + "cc", ] [[package]] @@ -1037,9 +1048,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" [[package]] name = "constant_time_eq" @@ -1078,10 +1089,19 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "glob", ] +[[package]] +name = "cpufeatures" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" +dependencies = [ + "libc", +] + [[package]] name = "cpufeatures" version = "0.2.1" @@ -1108,11 +1128,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27bbd3e6c422cf6282b047bcdd51ecd9ca9f3497a3be0132ffa08e509b824b0" +checksum = "899dc8d22f7771e7f887fb8bafa0c0d3ac1dea0c7f2c0ded6e20a855a7a1e890" dependencies = [ - "cranelift-entity 0.88.0", + "cranelift-entity 0.85.0", ] [[package]] @@ -1134,16 +1154,14 @@ dependencies = [ [[package]] name = "cranelift-codegen" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872f5d4557a411b087bd731df6347c142ae1004e6467a144a7e33662e5715a01" +checksum = "8dbdc03f695cf67e7bc45da57155528274f47390b85060af8107eb304ef167c4" dependencies = [ - "arrayvec 0.7.2", - "bumpalo", - "cranelift-bforest 0.88.0", - "cranelift-codegen-meta 0.88.0", - "cranelift-codegen-shared 0.88.0", - "cranelift-entity 0.88.0", + "cranelift-bforest 0.85.0", + "cranelift-codegen-meta 0.85.0", + "cranelift-codegen-shared 0.85.0", + "cranelift-entity 0.85.0", "cranelift-isle", "gimli 0.26.1", "log", @@ -1164,11 +1182,11 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b49fdebb29c62c1fc4da1eeebd609e9d530ecde24a9876def546275f73a244" +checksum = "9ea66cbba3eb7fcb3ec9f42839a6d381bd40cf97780397e7167daf9725d4ffa0" dependencies = [ - "cranelift-codegen-shared 0.88.0", + "cranelift-codegen-shared 0.85.0", ] [[package]] @@ -1179,9 +1197,9 @@ checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" [[package]] name = "cranelift-codegen-shared" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc0c091e2db055d4d7f6b7cec2d2ead286bcfaea3357c6a52c2a2613a8cb5ac" +checksum = "712fbebd119a476f59122b4ba51fdce893a66309b5c92bd5506bfb11a0587496" [[package]] name = "cranelift-entity" @@ -1191,9 +1209,9 @@ checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" [[package]] name = "cranelift-entity" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "354a9597be87996c9b278655e68b8447f65dd907256855ad773864edee8d985c" +checksum = "4cb8b95859c4e14c9e860db78d596a904fdbe9261990233b62bd526346cb56cb" dependencies = [ "serde", ] @@ -1212,11 +1230,11 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd8dd3fb8b82c772f4172e87ae1677b971676fffa7c4e3398e3047e650a266b" +checksum = "c7b91b19a7d1221a73f190c0e865c12be77a84f661cac89abfd4ab5820142886" dependencies = [ - "cranelift-codegen 0.88.0", + "cranelift-codegen 0.85.0", "log", "smallvec", "target-lexicon", @@ -1224,34 +1242,34 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82527802b1f7d8da288adc28f1dc97ea52943f5871c041213f7b5035ac698a7" +checksum = "86d4f53bc86fb458e59c695c6a95ce8346e6a8377ee7ffc058e3ac08b5f94cb1" [[package]] name = "cranelift-native" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c30ba8b910f1be023af0c39109cb28a8809734942a6b3eecbf2de8993052ea5e" +checksum = "592f035d0ed41214dfeeb37abd536233536a27be6b4c2d39f380cd402f0cff4f" dependencies = [ - "cranelift-codegen 0.88.0", + "cranelift-codegen 0.85.0", "libc", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.88.0" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "776a8916d201894aca9637a20814f1e11abc62acd5cfbe0b4eb2e63922756971" +checksum = "295add6bf0b527a8bc50d02e31ff878585d2d2db53cb7e8754d6d82b84480086" dependencies = [ - "cranelift-codegen 0.88.0", - "cranelift-entity 0.88.0", - "cranelift-frontend 0.88.0", + "cranelift-codegen 0.85.0", + "cranelift-entity 0.85.0", + "cranelift-frontend 0.85.0", "itertools", "log", "smallvec", - "wasmparser 0.89.1", + "wasmparser 0.85.0", "wasmtime-types", ] @@ -1261,7 +1279,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -1308,7 +1326,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-utils", ] @@ -1318,7 +1336,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", ] @@ -1329,7 +1347,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-utils", "lazy_static", "memoffset", @@ -1342,7 +1360,7 @@ version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "lazy_static", ] @@ -1354,9 +1372,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.4.8" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" dependencies = [ "generic-array 0.14.4", "rand_core 0.6.2", @@ -1435,6 +1453,17 @@ dependencies = [ "cipher", ] +[[package]] +name = "cuckoofilter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +dependencies = [ + "byteorder", + "fnv", + "rand 0.7.3", +] + [[package]] name = "curve25519-dalek" version = "2.1.2" @@ -1474,50 +1503,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "darling" version = "0.13.0" @@ -1581,12 +1566,11 @@ dependencies = [ [[package]] name = "der" -version = "0.6.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" dependencies = [ "const-oid", - "zeroize", ] [[package]] @@ -1656,7 +1640,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "dirs-sys-next", ] @@ -1695,7 +1679,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ "byteorder", - "quick-error", + "quick-error 1.2.3", ] [[package]] @@ -1704,12 +1688,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "downcast" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" - [[package]] name = "downcast-rs" version = "1.2.0" @@ -1772,14 +1750,14 @@ checksum = "64fba5a42bd76a17cad4bfa00de168ee1cbfa06a5e8ce992ae880218c05641a9" dependencies = [ "byteorder", "dynasm", - "memmap2", + "memmap2 0.5.0", ] [[package]] name = "ecdsa" -version = "0.14.7" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85789ce7dfbd0f0624c07ef653a08bb2ebf43d3e16531361f46d36dd54334fed" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" dependencies = [ "der", "elliptic-curve", @@ -1810,20 +1788,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "ed25519-zebra" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6" -dependencies = [ - "curve25519-dalek 3.0.2", - "hashbrown 0.12.3", - "hex", - "rand_core 0.6.2", - "sha2 0.9.8", - "zeroize", -] - [[package]] name = "either" version = "1.6.1" @@ -1832,14 +1796,13 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "elliptic-curve" -version = "0.12.3" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.3", "ff", "generic-array 0.14.4", "group", @@ -1851,9 +1814,9 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck", "proc-macro2", @@ -2009,9 +1972,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" +checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" dependencies = [ "rand_core 0.6.2", "subtle", @@ -2033,7 +1996,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "redox_syscall", "winapi", @@ -2051,19 +2014,19 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.0", + "rand 0.8.4", "scale-info", ] [[package]] name = "fixed-hash" -version = "0.8.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.5", + "rand 0.8.4", "rustc-hex", "static_assertions", ] @@ -2080,22 +2043,13 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crc32fast", "libc", "libz-sys", "miniz_oxide", ] -[[package]] -name = "float-cmp" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" -dependencies = [ - "num-traits", -] - [[package]] name = "fnv" version = "1.0.7" @@ -2119,29 +2073,21 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "fragile" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" - [[package]] name = "frame-benchmarking" version = "4.0.0-dev" dependencies = [ - "array-bytes", "frame-support", "frame-system", + "hex-literal", "linregress", "log", "parity-scale-codec", - "paste", - "rusty-fork", + "paste 1.0.6", "scale-info", "serde", "sp-api", "sp-application-crypto", - "sp-core", "sp-io", "sp-keystore", "sp-runtime", @@ -2155,9 +2101,8 @@ name = "frame-benchmarking-cli" version = "4.0.0-dev" dependencies = [ "Inflector", - "array-bytes", "chrono", - "clap 4.0.11", + "clap 3.1.18", "comfy-table", "frame-benchmarking", "frame-support", @@ -2165,6 +2110,7 @@ dependencies = [ "gethostname", "handlebars", "hash-db", + "hex", "itertools", "kvdb", "lazy_static", @@ -2172,7 +2118,7 @@ dependencies = [ "log", "memory-db", "parity-scale-codec", - "rand 0.8.5", + "rand 0.8.4", "rand_pcg 0.3.1", "sc-block-builder", "sc-cli", @@ -2193,7 +2139,6 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-std", "sp-storage", "sp-trie", "tempfile", @@ -2239,13 +2184,13 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", "honggfuzz", "parity-scale-codec", - "rand 0.8.5", + "rand 0.8.4", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -2256,10 +2201,9 @@ dependencies = [ name = "frame-executive" version = "4.0.0-dev" dependencies = [ - "array-bytes", "frame-support", "frame-system", - "frame-try-runtime", + "hex-literal", "pallet-balances", "pallet-transaction-payment", "parity-scale-codec", @@ -2279,7 +2223,7 @@ version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "parity-scale-codec", "scale-info", "serde", @@ -2300,13 +2244,12 @@ dependencies = [ "once_cell", "parity-scale-codec", "parity-util-mem", - "paste", + "paste 1.0.6", "pretty_assertions", "scale-info", "serde", "serde_json", "smallvec", - "sp-api", "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", @@ -2317,7 +2260,6 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", - "sp-weights", "tt-call", ] @@ -2326,9 +2268,7 @@ name = "frame-support-procedural" version = "4.0.0-dev" dependencies = [ "Inflector", - "cfg-expr", "frame-support-procedural-tools", - "itertools", "proc-macro2", "quote", "syn", @@ -2415,7 +2355,6 @@ dependencies = [ "sp-runtime", "sp-std", "sp-version", - "sp-weights", "substrate-test-runtime-client", ] @@ -2447,12 +2386,23 @@ name = "frame-try-runtime" version = "0.10.0-dev" dependencies = [ "frame-support", - "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", ] +[[package]] +name = "fs-swap" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" +dependencies = [ + "lazy_static", + "libc", + "libloading 0.5.2", + "winapi", +] + [[package]] name = "fs2" version = "0.4.3" @@ -2661,7 +2611,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -2674,16 +2624,16 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] name = "ghash" -version = "0.4.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +checksum = "b442c439366184de619215247d24e908912b175e824a530253845ac4c251a5c1" dependencies = [ "opaque-debug 0.3.0", "polyval", @@ -2758,9 +2708,9 @@ dependencies = [ [[package]] name = "group" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" dependencies = [ "ff", "rand_core 0.6.2", @@ -2794,16 +2744,16 @@ checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" [[package]] name = "handlebars" -version = "4.3.5" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433e4ab33f1213cdc25b5fa45c76881240cfe79284cf2b395e8b9e312a30a2fd" +checksum = "99d6a30320f094710245150395bc763ad23128d6a1ebbad7594dc4164b62c56b" dependencies = [ "log", "pest", "pest_derive", + "quick-error 2.0.0", "serde", "serde_json", - "thiserror", ] [[package]] @@ -2832,9 +2782,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" dependencies = [ "ahash", ] @@ -2860,6 +2810,18 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + [[package]] name = "hmac" version = "0.8.1" @@ -2880,15 +2842,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.3", -] - [[package]] name = "hmac-drbg" version = "0.3.0" @@ -2902,14 +2855,13 @@ dependencies = [ [[package]] name = "honggfuzz" -version = "0.5.55" +version = "0.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848e9c511092e0daa0a35a63e8e6e475a3e8f870741448b9f6028d69b142f18e" +checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" dependencies = [ "arbitrary", "lazy_static", - "memmap2", - "rustc_version 0.4.0", + "memmap", ] [[package]] @@ -2931,7 +2883,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.4", + "itoa 1.0.1", ] [[package]] @@ -2947,9 +2899,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" @@ -2965,9 +2917,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.22" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -2978,7 +2930,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.4", + "itoa 0.4.8", "pin-project-lite 0.2.6", "socket2", "tokio", @@ -3031,9 +2983,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "2.0.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065c008e570a43c00de6aed9714035e5ea6a498c255323db9091722af6ee67dd" +checksum = "ae8f4a3c3d4c89351ca83e120c1c00b27df945d38e05695668c9d4b4f7bc52f3" dependencies = [ "async-io", "core-foundation", @@ -3058,9 +3010,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.4.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" dependencies = [ "serde", ] @@ -3093,7 +3045,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -3105,6 +3057,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "io-lifetimes" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec58677acfea8a15352d42fc87d11d63596ade9239e0a7c9352914417515dbe6" + [[package]] name = "io-lifetimes" version = "0.7.2" @@ -3152,9 +3110,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "jobserver" @@ -3229,8 +3187,8 @@ dependencies = [ "hyper", "jsonrpsee-types", "lazy_static", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.0", + "rand 0.8.4", "rustc-hash", "serde", "serde_json", @@ -3320,14 +3278,14 @@ dependencies = [ [[package]] name = "k256" -version = "0.11.5" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3636d281d46c3b64182eb3a0a42b7b483191a2ecc3f05301fa67403f7c9bc949" +checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sha2 0.10.2", + "sec1", ] [[package]] @@ -3359,6 +3317,7 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", + "hex-literal", "log", "node-primitives", "pallet-alliance", @@ -3374,6 +3333,7 @@ dependencies = [ "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", + "pallet-contracts-rpc-runtime-api", "pallet-conviction-voting", "pallet-democracy", "pallet-dex", @@ -3381,7 +3341,6 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", - "pallet-fast-unstake", "pallet-gilt", "pallet-grandpa", "pallet-identity", @@ -3453,9 +3412,9 @@ dependencies = [ [[package]] name = "kvdb" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585089ceadba0197ffe9af6740ab350b325e3c1f5fccfbc3522e0250c750409b" +checksum = "a301d8ecb7989d4a6e2c57a49baca77d353bdbf879909debe3f375fe25d61f86" dependencies = [ "parity-util-mem", "smallvec", @@ -3463,26 +3422,28 @@ dependencies = [ [[package]] name = "kvdb-memorydb" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d109c87bfb7759edd2a49b2649c1afe25af785d930ad6a38479b4dc70dd873" +checksum = "ece7e668abd21387aeb6628130a6f4c802787f014fa46bc83221448322250357" dependencies = [ "kvdb", "parity-util-mem", - "parking_lot 0.12.1", + "parking_lot 0.12.0", ] [[package]] name = "kvdb-rocksdb" -version = "0.16.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c076cc2cdbac89b9910c853a36c957d3862a779f31c2661174222cefb49ee597" +checksum = "ca7fbdfd71cd663dceb0faf3367a99f8cf724514933e9867cec4995b6027cbc1" dependencies = [ + "fs-swap", "kvdb", "log", "num_cpus", + "owning_ref", "parity-util-mem", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "regex", "rocksdb", "smallvec", @@ -3524,13 +3485,23 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "libloading" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" +dependencies = [ + "cc", + "winapi", +] + [[package]] name = "libloading" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "winapi", ] @@ -3542,9 +3513,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.49.0" +version = "0.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec878fda12ebec479186b3914ebc48ff180fa4c51847e11a1a68bf65249e02c1" +checksum = "81327106887e42d004fbdab1fef93675be2e2e07c1b95fce45e2cc813485611d" dependencies = [ "bytes", "futures", @@ -3552,8 +3523,12 @@ dependencies = [ "getrandom 0.2.3", "instant", "lazy_static", + "libp2p-autonat", "libp2p-core", + "libp2p-deflate", "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-mdns", @@ -3561,24 +3536,49 @@ dependencies = [ "libp2p-mplex", "libp2p-noise", "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-relay", + "libp2p-rendezvous", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", "libp2p-tcp", + "libp2p-uds", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multiaddr", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pin-project", + "rand 0.7.3", "smallvec", ] +[[package]] +name = "libp2p-autonat" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4decc51f3573653a9f4ecacb31b1b922dd20c25a6322bb15318ec04287ec46f9" +dependencies = [ + "async-trait", + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-request-response", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.8.4", +] + [[package]] name = "libp2p-core" -version = "0.37.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799676bb0807c788065e57551c6527d461ad572162b0519d1958946ff9e0539d" +checksum = "fbf9b94cefab7599b2d3dff2f93bee218c6621d68590b23ede4485813cbcece6" dependencies = [ "asn1_der", "bs58", @@ -3589,15 +3589,17 @@ dependencies = [ "futures-timer", "instant", "lazy_static", + "libsecp256k1", "log", "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pin-project", "prost", "prost-build", - "rand 0.8.5", + "rand 0.8.4", + "ring", "rw-stream-sink", "sha2 0.10.2", "smallvec", @@ -3607,26 +3609,83 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-deflate" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0183dc2a3da1fbbf85e5b6cf51217f55b14f5daea0c455a9536eef646bfec71" +dependencies = [ + "flate2", + "futures", + "libp2p-core", +] + [[package]] name = "libp2p-dns" -version = "0.37.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2322c9fb40d99101def6a01612ee30500c89abbbecb6297b3cd252903a4c1720" +checksum = "6cbf54723250fa5d521383be789bf60efdabe6bacfb443f87da261019a49b4b5" dependencies = [ "async-std-resolver", "futures", "libp2p-core", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "smallvec", "trust-dns-resolver", ] +[[package]] +name = "libp2p-floodsub" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98a4b6ffd53e355775d24b76f583fdda54b3284806f678499b57913adb94f231" +dependencies = [ + "cuckoofilter", + "fnv", + "futures", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74b4b888cfbeb1f5551acd3aa1366e01bf88ede26cc3c4645d0d2d004d5ca7b0" +dependencies = [ + "asynchronous-codec", + "base64", + "byteorder", + "bytes", + "fnv", + "futures", + "hex_fmt", + "instant", + "libp2p-core", + "libp2p-swarm", + "log", + "prometheus-client", + "prost", + "prost-build", + "rand 0.7.3", + "regex", + "sha2 0.10.2", + "smallvec", + "unsigned-varint", + "wasm-timer", +] + [[package]] name = "libp2p-identify" -version = "0.40.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf9a121f699e8719bda2e6e9e9b6ddafc6cff4602471d6481c1067930ccb29b" +checksum = "c50b585518f8efd06f93ac2f976bd672e17cdac794644b3117edd078e96bda06" dependencies = [ "asynchronous-codec", "futures", @@ -3645,9 +3704,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.41.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6721c200e2021f6c3fab8b6cf0272ead8912d871610ee194ebd628cecf428f22" +checksum = "740862893bb5f06ac24acc9d49bdeadc3a5e52e51818a30a25c1f3519da2c851" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -3662,7 +3721,7 @@ dependencies = [ "log", "prost", "prost-build", - "rand 0.8.5", + "rand 0.7.3", "sha2 0.10.2", "smallvec", "thiserror", @@ -3673,19 +3732,20 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.41.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "761704e727f7d68d58d7bc2231eafae5fc1b9814de24290f126df09d4bd37a15" +checksum = "66e5e5919509603281033fd16306c61df7a4428ce274b67af5e14b07de5cdcb2" dependencies = [ "async-io", "data-encoding", "dns-parser", "futures", "if-watch", + "lazy_static", "libp2p-core", "libp2p-swarm", "log", - "rand 0.8.5", + "rand 0.8.4", "smallvec", "socket2", "void", @@ -3693,23 +3753,25 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.10.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee31b08e78b7b8bfd1c4204a9dd8a87b4fcdf6dafc57eb51701c1c264a81cb9" +checksum = "ef8aff4a1abef42328fbb30b17c853fff9be986dc39af17ee39f9c5f755c5e0c" dependencies = [ "libp2p-core", + "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-ping", + "libp2p-relay", "libp2p-swarm", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.37.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692664acfd98652de739a8acbb0a0d670f1d67190a49be6b4395e22c37337d89" +checksum = "61fd1b20638ec209c5075dfb2e8ce6a7ea4ec3cd3ad7b77f7a477c06d53322e2" dependencies = [ "asynchronous-codec", "bytes", @@ -3717,17 +3779,17 @@ dependencies = [ "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.0", + "rand 0.7.3", "smallvec", "unsigned-varint", ] [[package]] name = "libp2p-noise" -version = "0.40.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048155686bd81fe6cb5efdef0c6290f25ad32a0a42e8f4f72625cf6a505a206f" +checksum = "762408cb5d84b49a600422d7f9a42c18012d8da6ebcd570f9a4a4290ba41fb6f" dependencies = [ "bytes", "curve25519-dalek 3.0.2", @@ -3737,7 +3799,7 @@ dependencies = [ "log", "prost", "prost-build", - "rand 0.8.5", + "rand 0.8.4", "sha2 0.10.2", "snow", "static_assertions", @@ -3747,25 +3809,105 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.40.1" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "100a6934ae1dbf8a693a4e7dd1d730fd60b774dafc45688ed63b554497c6c925" +dependencies = [ + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-swarm", + "log", + "rand 0.7.3", + "void", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be27bf0820a6238a4e06365b096d428271cce85a129cf16f2fe9eb1610c4df86" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core", + "log", + "prost", + "prost-build", + "unsigned-varint", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7228b9318d34689521349a86eb39a3c3a802c9efc99a0568062ffb80913e3f91" +checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" dependencies = [ + "futures", + "log", + "pin-project", + "rand 0.7.3", + "salsa20", + "sha3 0.9.1", +] + +[[package]] +name = "libp2p-relay" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4931547ee0cce03971ccc1733ff05bb0c4349fd89120a39e9861e2bbe18843c3" +dependencies = [ + "asynchronous-codec", + "bytes", + "either", "futures", "futures-timer", "instant", "libp2p-core", "libp2p-swarm", "log", - "rand 0.8.5", + "pin-project", + "prost", + "prost-build", + "prost-codec", + "rand 0.8.4", + "smallvec", + "static_assertions", + "thiserror", + "void", +] + +[[package]] +name = "libp2p-rendezvous" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9511c9672ba33284838e349623319c8cad2d18cfad243ae46c6b7e8a2982ea4e" +dependencies = [ + "asynchronous-codec", + "bimap", + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.8.4", + "sha2 0.10.2", + "thiserror", + "unsigned-varint", "void", ] [[package]] name = "libp2p-request-response" -version = "0.22.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8827af16a017b65311a410bb626205a9ad92ec0473967618425039fa5231adc1" +checksum = "508a189e2795d892c8f5c1fa1e9e0b1845d32d7b0b249dbf7b05b18811361843" dependencies = [ "async-trait", "bytes", @@ -3774,16 +3916,16 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand 0.8.5", + "rand 0.7.3", "smallvec", "unsigned-varint", ] [[package]] name = "libp2p-swarm" -version = "0.40.1" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d13df7c37807965d82930c0e4b04a659efcb6cca237373b206043db5398ecf" +checksum = "95ac5be6c2de2d1ff3f7693fda6faf8a827b1f3e808202277783fea9f527d114" dependencies = [ "either", "fnv", @@ -3793,7 +3935,7 @@ dependencies = [ "libp2p-core", "log", "pin-project", - "rand 0.8.5", + "rand 0.7.3", "smallvec", "thiserror", "void", @@ -3801,36 +3943,48 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.30.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddc4497a8b5a506013c40e8189864f9c3a00db2b25671f428ae9007f3ba32" +checksum = "9f54a64b6957249e0ce782f8abf41d97f69330d02bf229f0672d864f0650cc76" dependencies = [ - "heck", "quote", "syn", ] [[package]] name = "libp2p-tcp" -version = "0.37.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9839d96761491c6d3e238e70554b856956fca0ab60feb9de2cd08eed4473fa92" +checksum = "8a6771dc19aa3c65d6af9a8c65222bfc8fcd446630ddca487acd161fa6096f3b" dependencies = [ "async-io", "futures", "futures-timer", "if-watch", + "ipnet", "libc", "libp2p-core", "log", "socket2", ] +[[package]] +name = "libp2p-uds" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d125e3e5f0d58f3c6ac21815b20cf4b6a88b8db9dc26368ea821838f4161fd4d" +dependencies = [ + "async-std", + "futures", + "libp2p-core", + "log", +] + [[package]] name = "libp2p-wasm-ext" -version = "0.37.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b5b8e7a73e379e47b1b77f8a82c4721e97eca01abcd18e9cd91a23ca6ce97" +checksum = "ec894790eec3c1608f8d1a8a0bdf0dbeb79ed4de2dce964222011c2896dfa05a" dependencies = [ "futures", "js-sys", @@ -3842,16 +3996,16 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.39.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3758ae6f89b2531a24b6d9f5776bda6a626b60a57600d7185d43dfa75ca5ecc4" +checksum = "9808e57e81be76ff841c106b4c5974fb4d41a233a7bdd2afbf1687ac6def3818" dependencies = [ "either", "futures", "futures-rustls", "libp2p-core", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "quicksink", "rw-stream-sink", "soketto", @@ -3861,23 +4015,22 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.41.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30f079097a21ad017fc8139460630286f02488c8c13b26affb46623aa20d8845" +checksum = "c6dea686217a06072033dc025631932810e2f6ad784e4fafa42e27d311c7a81c" dependencies = [ "futures", "libp2p-core", - "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "thiserror", "yamux", ] [[package]] name = "librocksdb-sys" -version = "0.8.0+7.4.4" +version = "0.6.1+6.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" +checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" dependencies = [ "bindgen", "bzip2-sys", @@ -3901,7 +4054,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand 0.8.5", + "rand 0.8.4", "serde", "sha2 0.9.8", "typenum", @@ -3948,15 +4101,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "link-cplusplus" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.4" @@ -3982,6 +4126,12 @@ dependencies = [ "statrs", ] +[[package]] +name = "linux-raw-sys" +version = "0.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5284f00d480e1c39af34e72f8ad60b94f47007e3481cd3b731c1d67190ddc7b7" + [[package]] name = "linux-raw-sys" version = "0.0.46" @@ -3990,20 +4140,20 @@ checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" [[package]] name = "lite-json" -version = "0.2.0" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0e787ffe1153141a0f6f6d759fdf1cc34b1226e088444523812fd412a5cca2" +checksum = "0460d985423a026b4d9b828a7c6eed1bcf606f476322f3f9b507529686a61715" dependencies = [ "lite-parser", ] [[package]] name = "lite-parser" -version = "0.2.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d5f9dc37c52d889a21fd701983d02bb6a84f852c5140a6c80ef4557f7dc29e" +checksum = "0c50092e40e0ccd1bf2015a10333fde0502ff95b832b0895dc1ca0d7ac6c52f6" dependencies = [ - "paste", + "paste 0.1.18", ] [[package]] @@ -4021,7 +4171,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "value-bag", ] @@ -4048,11 +4198,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.8.1" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +checksum = "32613e41de4c47ab04970c348ca7ae7382cf116625755af070b008a15516a889" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.11.2", ] [[package]] @@ -4066,9 +4216,9 @@ dependencies = [ [[package]] name = "lz4" -version = "1.24.0" +version = "1.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9e2dd86df36ce760a60f6ff6ad526f7ba1f14ba0356f8254fb6905e6494df1" +checksum = "aac20ed6991e01bf6a2e68cc73df2b389707403662a8ba89f68511fb340f724c" dependencies = [ "libc", "lz4-sys", @@ -4076,9 +4226,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.9.4" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +checksum = "dca79aa95d8b3226213ad454d328369853be3a1382d89532a854f4d69640acae" dependencies = [ "cc", "libc", @@ -4093,6 +4243,12 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + [[package]] name = "match_cfg" version = "0.1.0" @@ -4131,11 +4287,30 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memfd" -version = "0.6.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "480b5a5de855d11ff13195950bdc8b98b5e942ef47afc447f6615cdcc4e15d80" +checksum = "f6627dc657574b49d6ad27105ed671822be56e0d2547d413bfbf3e8d8fa92e7a" dependencies = [ - "rustix", + "libc", +] + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "memmap2" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6" +dependencies = [ + "libc", ] [[package]] @@ -4158,20 +4333,20 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.30.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac11bb793c28fa095b7554466f53b3a60a2cd002afdac01bcf135cbd73a269" +checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" dependencies = [ "hash-db", - "hashbrown 0.12.3", + "hashbrown 0.12.0", "parity-util-mem", ] [[package]] name = "memory_units" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] name = "merlin" @@ -4213,33 +4388,6 @@ dependencies = [ "windows-sys 0.36.1", ] -[[package]] -name = "mockall" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" -dependencies = [ - "cfg-if", - "downcast", - "fragile", - "lazy_static", - "mockall_derive", - "predicates", - "predicates-tree", -] - -[[package]] -name = "mockall_derive" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" -dependencies = [ - "cfg-if", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "more-asserts" version = "0.2.1" @@ -4288,7 +4436,7 @@ dependencies = [ "digest 0.10.3", "multihash-derive", "sha2 0.10.2", - "sha3", + "sha3 0.10.0", "unsigned-varint", ] @@ -4314,9 +4462,9 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bc41247ec209813e2fd414d6e16b9d94297dacf3cd613fa6ef09cd4d9755c10" +checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" dependencies = [ "bytes", "futures", @@ -4336,9 +4484,9 @@ dependencies = [ "matrixmultiply", "nalgebra-macros", "num-complex", - "num-rational", + "num-rational 0.4.0", "num-traits", - "rand 0.8.5", + "rand 0.8.4", "rand_distr", "simba", "typenum", @@ -4361,7 +4509,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7d66043b25d4a6cccb23619d10c19c25304b355a7dccd4a8e11423dd2382146" dependencies = [ - "rand 0.8.5", + "rand 0.8.4", ] [[package]] @@ -4378,9 +4526,9 @@ dependencies = [ [[package]] name = "netlink-packet-route" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +checksum = "733ea73609acfd7fa7ddadfb7bf709b0471668c456ad9513685af543a06342b2" dependencies = [ "anyhow", "bitflags", @@ -4398,30 +4546,29 @@ checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" dependencies = [ "anyhow", "byteorder", - "paste", + "paste 1.0.6", "thiserror", ] [[package]] name = "netlink-proto" -version = "0.10.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +checksum = "ef8785b8141e8432aa45fceb922a7e876d7da3fad37fa7e7ec702ace3aa0826b" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror", "tokio", ] [[package]] name = "netlink-sys" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" +checksum = "3e4c9f9547a08241bee7b6558b9b98e1f290d187de8b7cfca2bbb4937bcaa8f8" dependencies = [ "async-io", "bytes", @@ -4432,38 +4579,40 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "e4916f159ed8e5de0082076562152a76b7a1f64a01fd9d1e0fea002c37624faf" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", "memoffset", ] [[package]] name = "nix" -version = "0.24.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", - "cfg-if", + "cc", + "cfg-if 1.0.0", "libc", + "memoffset", ] [[package]] name = "node-bench" version = "0.9.0-dev" dependencies = [ - "array-bytes", - "clap 4.0.11", + "clap 3.1.18", "derive_more", "fs_extra", "futures", "hash-db", + "hex", "kitchensink-runtime", "kvdb", "kvdb-rocksdb", @@ -4495,16 +4644,16 @@ dependencies = [ name = "node-cli" version = "3.0.0-dev" dependencies = [ - "array-bytes", "assert_cmd", "async-std", - "clap 4.0.11", + "clap 3.1.18", "clap_complete", "criterion", "frame-benchmarking-cli", "frame-system", "frame-system-rpc-runtime-api", "futures", + "hex-literal", "jsonrpsee", "kitchensink-runtime", "log", @@ -4521,8 +4670,9 @@ dependencies = [ "pallet-transaction-payment", "parity-scale-codec", "platforms", - "rand 0.8.5", + "rand 0.8.4", "regex", + "remote-externalities", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", @@ -4539,7 +4689,6 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", - "sc-network-common", "sc-rpc", "sc-service", "sc-service-test", @@ -4570,7 +4719,6 @@ dependencies = [ "sp-trie", "substrate-build-script-utils", "substrate-frame-cli", - "substrate-rpc-client", "tempfile", "tokio", "try-runtime-cli", @@ -4617,7 +4765,7 @@ dependencies = [ name = "node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -4647,6 +4795,7 @@ version = "3.0.0-dev" dependencies = [ "jsonrpsee", "node-primitives", + "pallet-contracts-rpc", "pallet-dex-rpc", "pallet-mmr-rpc", "pallet-transaction-payment-rpc", @@ -4659,7 +4808,6 @@ dependencies = [ "sc-finality-grandpa-rpc", "sc-rpc", "sc-rpc-api", - "sc-rpc-spec-v2", "sc-sync-state-rpc", "sc-transaction-pool-api", "sp-api", @@ -4677,7 +4825,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "generate-bags", "kitchensink-runtime", ] @@ -4686,7 +4834,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -4735,6 +4883,7 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", + "hex-literal", "pallet-aura", "pallet-balances", "pallet-grandpa", @@ -4795,6 +4944,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -4812,17 +4967,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "normalize-line-endings" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" - [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ "autocfg", "num-integer", @@ -4840,12 +4989,12 @@ dependencies = [ [[package]] name = "num-format" -version = "0.4.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b862ff8df690cf089058c98b183676a7ed0f974cc08b426800093227cbff3b" +checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" dependencies = [ - "arrayvec 0.7.2", - "itoa 1.0.4", + "arrayvec 0.4.12", + "itoa 0.4.8", ] [[package]] @@ -4860,9 +5009,9 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", "num-bigint", @@ -4870,11 +5019,22 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", "libm", @@ -4911,18 +5071,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "object" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" -dependencies = [ - "crc32fast", - "hashbrown 0.12.3", - "indexmap", - "memchr", -] - [[package]] name = "once_cell" version = "1.12.0" @@ -4968,14 +5116,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "owning_ref" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" +dependencies = [ + "stable_deref_trait", +] + [[package]] name = "pallet-alliance" version = "4.0.0-dev" dependencies = [ - "array-bytes", "frame-benchmarking", "frame-support", "frame-system", + "hex", + "hex-literal", "log", "pallet-balances", "pallet-collective", @@ -5146,7 +5304,7 @@ dependencies = [ "frame-election-provider-support", "honggfuzz", "pallet-bags-list", - "rand 0.8.5", + "rand 0.8.4", ] [[package]] @@ -5206,11 +5364,12 @@ dependencies = [ name = "pallet-beefy-mmr" version = "4.0.0-dev" dependencies = [ - "array-bytes", "beefy-merkle-tree", "beefy-primitives", "frame-support", "frame-system", + "hex", + "hex-literal", "log", "pallet-beefy", "pallet-mmr", @@ -5282,13 +5441,13 @@ dependencies = [ name = "pallet-contracts" version = "4.0.0-dev" dependencies = [ - "array-bytes", "assert_matches", "bitflags", "env_logger", "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "impl-trait-for-tuples", "log", "pallet-balances", @@ -5299,12 +5458,11 @@ dependencies = [ "pallet-utility", "parity-scale-codec", "pretty_assertions", - "rand 0.8.5", + "rand 0.8.4", "rand_pcg 0.3.1", "scale-info", "serde", "smallvec", - "sp-api", "sp-core", "sp-io", "sp-keystore", @@ -5322,9 +5480,12 @@ version = "6.0.0" dependencies = [ "bitflags", "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-rpc", "sp-runtime", "sp-std", - "sp-weights", ] [[package]] @@ -5336,6 +5497,35 @@ dependencies = [ "syn", ] +[[package]] +name = "pallet-contracts-rpc" +version = "4.0.0-dev" +dependencies = [ + "jsonrpsee", + "pallet-contracts-primitives", + "pallet-contracts-rpc-runtime-api", + "parity-scale-codec", + "serde", + "serde_json", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-rpc", + "sp-runtime", +] + +[[package]] +name = "pallet-contracts-rpc-runtime-api" +version = "4.0.0-dev" +dependencies = [ + "pallet-contracts-primitives", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" @@ -5362,9 +5552,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log", "pallet-balances", - "pallet-preimage", "pallet-scheduler", "parity-scale-codec", "scale-info", @@ -5429,7 +5617,7 @@ dependencies = [ "pallet-balances", "pallet-election-provider-support-benchmarking", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -5510,27 +5698,18 @@ dependencies = [ ] [[package]] -name = "pallet-fast-unstake" -version = "4.0.0-dev" +name = "pallet-example-parallel" +version = "3.0.0-dev" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", "frame-support", "frame-system", - "log", - "pallet-balances", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core", "sp-io", "sp-runtime", - "sp-staking", "sp-std", - "sp-tracing", - "substrate-test-utils", + "sp-tasks", ] [[package]] @@ -5671,12 +5850,12 @@ dependencies = [ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ - "array-bytes", "ckb-merkle-mountain-range", "env_logger", "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "itertools", "parity-scale-codec", "scale-info", @@ -5691,7 +5870,6 @@ dependencies = [ name = "pallet-mmr-rpc" version = "3.0.0" dependencies = [ - "anyhow", "jsonrpsee", "parity-scale-codec", "serde", @@ -5710,7 +5888,6 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -5787,26 +5964,10 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-runtime-interface", "sp-staking", "sp-std", ] -[[package]] -name = "pallet-nomination-pools-fuzzer" -version = "2.0.0" -dependencies = [ - "frame-support", - "frame-system", - "honggfuzz", - "log", - "pallet-nomination-pools", - "rand 0.8.5", - "sp-io", - "sp-runtime", - "sp-tracing", -] - [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" @@ -5891,7 +6052,6 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -5991,37 +6151,15 @@ dependencies = [ name = "pallet-remark" version = "4.0.0-dev" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-root-offences" -version = "1.0.0" -dependencies = [ - "frame-election-provider-support", + "frame-benchmarking", "frame-support", "frame-system", - "pallet-balances", - "pallet-offences", - "pallet-session", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-timestamp", "parity-scale-codec", "scale-info", + "serde", "sp-core", "sp-io", "sp-runtime", - "sp-staking", "sp-std", ] @@ -6177,7 +6315,7 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "remote-externalities", "scale-info", "serde", @@ -6287,7 +6425,6 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-weights", ] [[package]] @@ -6298,17 +6435,16 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-runtime", - "sp-weights", ] [[package]] name = "pallet-transaction-storage" version = "4.0.0-dev" dependencies = [ - "array-bytes", "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "log", "pallet-balances", "parity-scale-codec", @@ -6366,8 +6502,6 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "pallet-collective", - "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core", @@ -6413,20 +6547,20 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.2" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7511a0bec4a336b5929999d02b560d2439c993cccf98c26481484e811adc43" +checksum = "55a7901b85874402471e131de3332dde0e51f38432c69a3853627c8e25433048" dependencies = [ - "blake2", + "blake2-rfc", "crc32fast", "fs2", "hex", "libc", "log", "lz4", - "memmap2", - "parking_lot 0.12.1", - "rand 0.8.5", + "memmap2 0.2.1", + "parking_lot 0.11.2", + "rand 0.8.4", "snap", ] @@ -6465,15 +6599,15 @@ checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" [[package]] name = "parity-util-mem" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" +checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ - "cfg-if", - "hashbrown 0.12.3", + "cfg-if 1.0.0", + "hashbrown 0.12.0", "impl-trait-for-tuples", "parity-util-mem-derive", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "primitive-types", "smallvec", "winapi", @@ -6492,9 +6626,18 @@ dependencies = [ [[package]] name = "parity-wasm" -version = "0.45.0" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" +dependencies = [ + "byteorder", +] + +[[package]] +name = "parity-wasm" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" [[package]] name = "parking" @@ -6515,9 +6658,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ "lock_api", "parking_lot_core 0.9.1", @@ -6529,7 +6672,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "instant", "libc", "redox_syscall", @@ -6543,7 +6686,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "redox_syscall", "smallvec", @@ -6552,9 +6695,28 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.9" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" + +[[package]] +name = "paste-impl" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] [[package]] name = "pbkdf2" @@ -6588,19 +6750,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pest" -version = "2.4.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" dependencies = [ - "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.4.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" +checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" dependencies = [ "pest", "pest_generator", @@ -6608,9 +6769,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.4.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" +checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", @@ -6621,13 +6782,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.4.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" +checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" dependencies = [ - "once_cell", + "maplit", "pest", - "sha1", + "sha-1 0.8.2", ] [[package]] @@ -6642,18 +6803,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -6678,16 +6839,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der", - "spki", -] - [[package]] name = "pkg-config" version = "0.3.19" @@ -6730,37 +6881,36 @@ dependencies = [ [[package]] name = "polling" -version = "2.4.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4609a838d88b73d8238967b60dd115cc08d38e2bbaf51ee1e4b695f89122e2" +checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ - "autocfg", - "cfg-if", + "cfg-if 0.1.10", "libc", "log", - "wepoll-ffi", + "wepoll-sys", "winapi", ] [[package]] name = "poly1305" -version = "0.7.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "9fcffab1f78ebbdf4b93b68c1ffebc24037eedf271edaca795732b24e5e4e349" dependencies = [ - "cpufeatures", + "cpufeatures 0.1.5", "opaque-debug 0.3.0", "universal-hash", ] [[package]] name = "polyval" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "a6ba6a405ef63530d6cb12802014b22f9c5751bd17cdcddbe9e46d5c8ae83287" dependencies = [ - "cfg-if", - "cpufeatures", + "cfg-if 1.0.0", + "cpufeatures 0.1.5", "opaque-debug 0.3.0", "universal-hash", ] @@ -6778,11 +6928,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c143348f141cc87aab5b950021bac6145d0e5ae754b0591de23244cee42c9308" dependencies = [ "difflib", - "float-cmp", "itertools", - "normalize-line-endings", "predicates-core", - "regex", ] [[package]] @@ -6815,9 +6962,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.12.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cfd65aea0c5fa0bfcc7c9e7ca828c921ef778f43d325325ec84bda371bfa75a" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" dependencies = [ "fixed-hash", "impl-codec", @@ -6860,11 +7007,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + [[package]] name = "proc-macro2" -version = "1.0.46" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" +checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" dependencies = [ "unicode-ident", ] @@ -6875,7 +7028,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fnv", "lazy_static", "memchr", @@ -6885,21 +7038,21 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.18.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" +checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" dependencies = [ "dtoa", - "itoa 1.0.4", - "parking_lot 0.12.1", + "itoa 1.0.1", + "owning_ref", "prometheus-client-derive-text-encode", ] [[package]] name = "prometheus-client-derive-text-encode" -version = "0.3.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" +checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" dependencies = [ "proc-macro2", "quote", @@ -6908,9 +7061,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" dependencies = [ "bytes", "prost-derive", @@ -6918,11 +7071,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.1" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" dependencies = [ "bytes", + "cfg-if 1.0.0", + "cmake", "heck", "itertools", "lazy_static", @@ -6938,9 +7093,9 @@ dependencies = [ [[package]] name = "prost-codec" -version = "0.2.0" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" +checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" dependencies = [ "asynchronous-codec", "bytes", @@ -6951,9 +7106,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" dependencies = [ "anyhow", "itertools", @@ -6964,9 +7119,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ "bytes", "prost", @@ -7007,13 +7162,19 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" + [[package]] name = "quickcheck" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "rand 0.8.5", + "rand 0.8.4", ] [[package]] @@ -7052,19 +7213,20 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc", + "rand_hc 0.2.0", "rand_pcg 0.2.1", ] [[package]] name = "rand" -version = "0.8.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha 0.3.0", "rand_core 0.6.2", + "rand_hc 0.3.0", ] [[package]] @@ -7112,7 +7274,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.5", + "rand 0.8.4", ] [[package]] @@ -7124,6 +7286,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.2", +] + [[package]] name = "rand_pcg" version = "0.2.1" @@ -7225,9 +7396,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.3.2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779" +checksum = "0d37148700dbb38f994cd99a1431613057f37ed934d7e4d799b7ab758c482461" dependencies = [ "fxhash", "log", @@ -7237,9 +7408,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -7258,9 +7429,21 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" + +[[package]] +name = "region" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" +dependencies = [ + "bitflags", + "libc", + "mach", + "winapi", +] [[package]] name = "region" @@ -7280,6 +7463,7 @@ version = "0.10.0-dev" dependencies = [ "env_logger", "frame-support", + "jsonrpsee", "log", "pallet-elections-phragmen", "parity-scale-codec", @@ -7289,7 +7473,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-version", - "substrate-rpc-client", "tokio", ] @@ -7318,17 +7501,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", - "quick-error", + "quick-error 1.2.3", ] [[package]] name = "rfc6979" -version = "0.3.0" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" dependencies = [ "crypto-bigint", - "hmac 0.12.1", + "hmac 0.11.0", "zeroize", ] @@ -7354,7 +7537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f08c8062c1fe1253064043b8fc07bfea1b9702b71b4a86c11ea3588183b12e1" dependencies = [ "bytecheck", - "hashbrown 0.12.3", + "hashbrown 0.12.0", "ptr_meta", "rend", "rkyv_derive", @@ -7374,9 +7557,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.19.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" +checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" dependencies = [ "libc", "librocksdb-sys", @@ -7384,9 +7567,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.0.0" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" +checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" dependencies = [ "libc", "winapi", @@ -7394,16 +7577,16 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.10.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +checksum = "6f54290e54521dac3de4149d83ddf9f62a359b3cc93bcb494a794a41e6f4744b" dependencies = [ "async-global-executor", "futures", "log", "netlink-packet-route", "netlink-proto", - "nix 0.24.2", + "nix 0.22.3", "thiserror", ] @@ -7445,15 +7628,29 @@ dependencies = [ [[package]] name = "rustix" -version = "0.35.9" +version = "0.33.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938a344304321a9da4973b9ff4f9f8db9caf4597dfd9dda6a60b523340a0fff0" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes 0.5.3", + "libc", + "linux-raw-sys 0.0.42", + "winapi", +] + +[[package]] +name = "rustix" +version = "0.35.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" +checksum = "ef258c11e17f5c01979a10543a30a4e12faef6aab217a74266e747eefa3aed88" dependencies = [ "bitflags", "errno", - "io-lifetimes", + "io-lifetimes 0.7.2", "libc", - "linux-raw-sys", + "linux-raw-sys 0.0.46", "windows-sys 0.36.1", ] @@ -7496,17 +7693,6 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" -[[package]] -name = "rusty-fork" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" -dependencies = [ - "fnv", - "quick-error", - "tempfile", -] - [[package]] name = "rw-stream-sink" version = "0.3.0" @@ -7533,6 +7719,15 @@ dependencies = [ "rustc_version 0.2.3", ] +[[package]] +name = "salsa20" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0fbb5f676da676c260ba276a8f43a8dc67cf02d1438423aeb1c677a7212686" +dependencies = [ + "cipher", +] + [[package]] name = "same-file" version = "1.0.6" @@ -7568,7 +7763,7 @@ dependencies = [ "quickcheck", "rand 0.7.3", "sc-client-api", - "sc-network-common", + "sc-network", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -7589,7 +7784,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -7627,10 +7822,10 @@ name = "sc-chain-spec" version = "4.0.0-dev" dependencies = [ "impl-trait-for-tuples", - "memmap2", + "memmap2 0.5.0", "parity-scale-codec", "sc-chain-spec-derive", - "sc-network-common", + "sc-network", "sc-telemetry", "serde", "serde_json", @@ -7652,11 +7847,11 @@ dependencies = [ name = "sc-cli" version = "0.10.0-dev" dependencies = [ - "array-bytes", "chrono", - "clap 4.0.11", + "clap 3.1.18", "fdlimit", "futures", + "hex", "libp2p", "log", "names", @@ -7668,7 +7863,6 @@ dependencies = [ "sc-client-db", "sc-keystore", "sc-network", - "sc-network-common", "sc-service", "sc-telemetry", "sc-tracing", @@ -7697,7 +7891,7 @@ dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -7722,9 +7916,7 @@ dependencies = [ name = "sc-client-db" version = "0.10.0-dev" dependencies = [ - "criterion", "hash-db", - "kitchensink-runtime", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", @@ -7732,9 +7924,8 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "quickcheck", - "rand 0.8.5", "sc-client-api", "sc-state-db", "sp-arithmetic", @@ -7758,7 +7949,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-client-api", "sc-utils", "serde", @@ -7781,7 +7972,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -7820,10 +8011,11 @@ dependencies = [ "log", "merlin", "num-bigint", - "num-rational", + "num-rational 0.2.4", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", + "rand 0.7.3", "rand_chacha 0.2.2", "sc-block-builder", "sc-client-api", @@ -7847,7 +8039,6 @@ dependencies = [ "sp-core", "sp-inherents", "sp-io", - "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", @@ -7855,6 +8046,7 @@ dependencies = [ "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", + "tempfile", "thiserror", ] @@ -7944,7 +8136,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-client-api", "sc-consensus", "sp-api", @@ -7979,6 +8171,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-timestamp", "substrate-test-runtime-client", "thiserror", ] @@ -7997,15 +8190,15 @@ dependencies = [ name = "sc-executor" version = "0.10.0-dev" dependencies = [ - "array-bytes", "criterion", "env_logger", + "hex-literal", "lazy_static", "lru", "num_cpus", "parity-scale-codec", - "parking_lot 0.12.1", - "paste", + "parking_lot 0.12.0", + "paste 1.0.6", "regex", "sc-executor-common", "sc-executor-wasmi", @@ -8022,6 +8215,7 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-state-machine", + "sp-tasks", "sp-trie", "sp-version", "sp-wasm-interface", @@ -8042,6 +8236,7 @@ dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", + "sp-serializer", "sp-wasm-interface", "thiserror", "wasm-instrument", @@ -8067,14 +8262,14 @@ dependencies = [ name = "sc-executor-wasmtime" version = "0.10.0-dev" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "log", "once_cell", "parity-scale-codec", - "parity-wasm", - "paste", - "rustix", + "parity-wasm 0.42.2", + "paste 1.0.6", + "rustix 0.35.6", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -8092,7 +8287,6 @@ name = "sc-finality-grandpa" version = "0.10.0-dev" dependencies = [ "ahash", - "array-bytes", "assert_matches", "async-trait", "dyn-clone", @@ -8100,10 +8294,11 @@ dependencies = [ "fork-tree", "futures", "futures-timer", + "hex", "log", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.0", + "rand 0.8.4", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -8130,6 +8325,7 @@ dependencies = [ "sp-tracing", "substrate-prometheus-endpoint", "substrate-test-runtime-client", + "tempfile", "thiserror", "tokio", ] @@ -8169,7 +8365,7 @@ dependencies = [ "log", "parity-util-mem", "sc-client-api", - "sc-network-common", + "sc-network", "sc-transaction-pool-api", "sp-blockchain", "sp-runtime", @@ -8179,9 +8375,9 @@ dependencies = [ name = "sc-keystore" version = "4.0.0-dev" dependencies = [ - "array-bytes", "async-trait", - "parking_lot 0.12.1", + "hex", + "parking_lot 0.12.0", "serde_json", "sp-application-crypto", "sp-core", @@ -8194,7 +8390,6 @@ dependencies = [ name = "sc-network" version = "0.10.0-dev" dependencies = [ - "array-bytes", "assert_matches", "async-std", "async-trait", @@ -8207,6 +8402,7 @@ dependencies = [ "fork-tree", "futures", "futures-timer", + "hex", "ip_network", "libp2p", "linked-hash-map", @@ -8214,9 +8410,10 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pin-project", "prost", + "prost-build", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8242,58 +8439,25 @@ dependencies = [ "tempfile", "thiserror", "unsigned-varint", - "zeroize", -] - -[[package]] -name = "sc-network-bitswap" -version = "0.10.0-dev" -dependencies = [ - "cid", - "futures", - "libp2p", - "log", - "prost", - "prost-build", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-network-common", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-runtime", - "substrate-test-runtime", - "substrate-test-runtime-client", - "thiserror", - "tokio", - "unsigned-varint", "void", + "zeroize", ] [[package]] name = "sc-network-common" version = "0.10.0-dev" dependencies = [ - "async-trait", "bitflags", - "bytes", "futures", - "futures-timer", "libp2p", - "linked_hash_set", "parity-scale-codec", "prost-build", "sc-consensus", "sc-peerset", - "serde", "smallvec", - "sp-blockchain", "sp-consensus", "sp-finality-grandpa", "sp-runtime", - "substrate-prometheus-endpoint", - "thiserror", ] [[package]] @@ -8308,8 +8472,7 @@ dependencies = [ "log", "lru", "quickcheck", - "sc-network-common", - "sc-peerset", + "sc-network", "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -8320,7 +8483,6 @@ dependencies = [ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ - "array-bytes", "futures", "libp2p", "log", @@ -8340,14 +8502,11 @@ dependencies = [ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ - "array-bytes", - "async-std", "fork-tree", "futures", "libp2p", "log", "lru", - "mockall", "parity-scale-codec", "prost", "prost-build", @@ -8357,7 +8516,6 @@ dependencies = [ "sc-consensus", "sc-network-common", "sc-peerset", - "sc-utils", "smallvec", "sp-arithmetic", "sp-blockchain", @@ -8381,7 +8539,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8401,47 +8559,27 @@ dependencies = [ "substrate-test-runtime-client", ] -[[package]] -name = "sc-network-transactions" -version = "0.10.0-dev" -dependencies = [ - "array-bytes", - "futures", - "hex", - "libp2p", - "log", - "parity-scale-codec", - "pin-project", - "sc-network-common", - "sc-peerset", - "sp-consensus", - "sp-runtime", - "substrate-prometheus-endpoint", -] - [[package]] name = "sc-offchain" version = "4.0.0-dev" dependencies = [ - "array-bytes", "bytes", "fnv", "futures", "futures-timer", + "hex", "hyper", "hyper-rustls", "lazy_static", - "libp2p", "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "rand 0.7.3", "sc-block-builder", "sc-client-api", "sc-client-db", - "sc-network-common", - "sc-peerset", + "sc-network", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", @@ -8490,12 +8628,11 @@ dependencies = [ "lazy_static", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-block-builder", "sc-chain-spec", "sc-client-api", "sc-network", - "sc-network-common", "sc-rpc-api", "sc-tracing", "sc-transaction-pool", @@ -8525,7 +8662,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-chain-spec", "sc-transaction-pool-api", "scale-info", @@ -8551,36 +8688,17 @@ dependencies = [ "tokio", ] -[[package]] -name = "sc-rpc-spec-v2" -version = "0.10.0-dev" -dependencies = [ - "futures", - "hex", - "jsonrpsee", - "parity-scale-codec", - "sc-chain-spec", - "sc-transaction-pool-api", - "serde", - "serde_json", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-runtime", - "thiserror", - "tokio", -] - [[package]] name = "sc-runtime-test" version = "2.0.0" dependencies = [ - "paste", + "paste 1.0.6", "sp-core", "sp-io", "sp-runtime", "sp-sandbox", "sp-std", + "sp-tasks", "substrate-wasm-builder", ] @@ -8599,7 +8717,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pin-project", "rand 0.7.3", "sc-block-builder", @@ -8611,15 +8729,12 @@ dependencies = [ "sc-informant", "sc-keystore", "sc-network", - "sc-network-bitswap", "sc-network-common", "sc-network-light", "sc-network-sync", - "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-rpc-server", - "sc-rpc-spec-v2", "sc-sysinfo", "sc-telemetry", "sc-tracing", @@ -8646,7 +8761,6 @@ dependencies = [ "sp-transaction-storage-proof", "sp-trie", "sp-version", - "static_init", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -8661,19 +8775,19 @@ dependencies = [ name = "sc-service-test" version = "2.0.0" dependencies = [ - "array-bytes", "fdlimit", "futures", + "hex", + "hex-literal", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-block-builder", "sc-client-api", "sc-client-db", "sc-consensus", "sc-executor", "sc-network", - "sc-network-common", "sc-service", "sc-transaction-pool-api", "sp-api", @@ -8701,7 +8815,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-client-api", "sp-core", ] @@ -8739,7 +8853,6 @@ dependencies = [ "serde_json", "sp-core", "sp-io", - "sp-runtime", "sp-std", ] @@ -8751,7 +8864,7 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pin-project", "rand 0.7.3", "serde", @@ -8772,7 +8885,7 @@ dependencies = [ "libc", "log", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "regex", "rustc-hash", "sc-client-api", @@ -8805,17 +8918,16 @@ dependencies = [ name = "sc-transaction-pool" version = "4.0.0-dev" dependencies = [ - "array-bytes", "assert_matches", - "async-trait", "criterion", "futures", "futures-timer", + "hex", "linked-hash-map", "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", @@ -8839,11 +8951,9 @@ dependencies = [ name = "sc-transaction-pool-api" version = "4.0.0-dev" dependencies = [ - "async-trait", "futures", "log", "serde", - "serde_json", "sp-blockchain", "sp-runtime", "thiserror", @@ -8857,7 +8967,7 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "prometheus", "tokio-test", ] @@ -8869,7 +8979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8980cafbe98a7ee7a9cc16b32ebce542c77883f512d83fbf2ddc8f6a85ea74c9" dependencies = [ "bitvec", - "cfg-if", + "cfg-if 1.0.0", "derive_more", "parity-scale-codec", "scale-info-derive", @@ -8922,12 +9032,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" - [[package]] name = "sct" version = "0.7.0" @@ -8946,14 +9050,12 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "sec1" -version = "0.3.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ - "base16ct", "der", "generic-array 0.14.4", - "pkcs8", "subtle", "zeroize", ] @@ -9082,11 +9184,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.85" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ - "itoa 1.0.4", + "itoa 1.0.1", "ryu", "serde", ] @@ -9100,6 +9202,18 @@ dependencies = [ "serde", ] +[[package]] +name = "sha-1" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + [[package]] name = "sha-1" version = "0.9.4" @@ -9107,23 +9221,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 1.0.0", "cpuid-bool", "digest 0.9.0", "opaque-debug 0.3.0", ] -[[package]] -name = "sha1" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006769ba83e921b3085caa8334186b00cf92b4cb1a6cf4632fbccc8eff5c7549" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.3", -] - [[package]] name = "sha2" version = "0.8.2" @@ -9143,8 +9246,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", + "cfg-if 1.0.0", + "cpufeatures 0.2.1", "digest 0.9.0", "opaque-debug 0.3.0", ] @@ -9155,11 +9258,23 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ - "cfg-if", - "cpufeatures", + "cfg-if 1.0.0", + "cpufeatures 0.2.1", "digest 0.10.3", ] +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +] + [[package]] name = "sha3" version = "0.10.0" @@ -9206,11 +9321,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.6.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" +checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" dependencies = [ - "digest 0.10.3", + "digest 0.9.0", "rand_core 0.6.2", ] @@ -9223,7 +9338,7 @@ dependencies = [ "approx", "num-complex", "num-traits", - "paste", + "paste 1.0.6", ] [[package]] @@ -9289,8 +9404,8 @@ dependencies = [ "futures", "httparse", "log", - "rand 0.8.5", - "sha-1", + "rand 0.8.4", + "sha-1 0.9.4", ] [[package]] @@ -9306,7 +9421,6 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-test-primitives", - "sp-trie", "sp-version", "thiserror", ] @@ -9437,7 +9551,7 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sp-api", "sp-consensus", "sp-database", @@ -9544,17 +9658,18 @@ dependencies = [ name = "sp-core" version = "6.0.0" dependencies = [ - "array-bytes", "base58", "bitflags", - "blake2", + "blake2-rfc", "byteorder", "criterion", "dyn-clonable", - "ed25519-zebra", + "ed25519-dalek", "futures", "hash-db", "hash256-std-hasher", + "hex", + "hex-literal", "impl-serde", "lazy_static", "libsecp256k1", @@ -9562,7 +9677,8 @@ dependencies = [ "merlin", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parity-util-mem", + "parking_lot 0.12.0", "primitive-types", "rand 0.7.3", "regex", @@ -9596,7 +9712,7 @@ dependencies = [ "byteorder", "digest 0.10.3", "sha2 0.10.2", - "sha3", + "sha3 0.10.0", "sp-std", "twox-hash", ] @@ -9616,7 +9732,7 @@ name = "sp-database" version = "4.0.0-dev" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.0", ] [[package]] @@ -9679,7 +9795,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "secp256k1", "sp-core", "sp-externalities", @@ -9712,7 +9828,7 @@ dependencies = [ "futures", "merlin", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "rand 0.7.3", "rand_chacha 0.2.2", "schnorrkel", @@ -9734,17 +9850,15 @@ dependencies = [ name = "sp-mmr-primitives" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "hex-literal", "log", "parity-scale-codec", - "scale-info", "serde", "sp-api", "sp-core", "sp-debug-derive", "sp-runtime", "sp-std", - "thiserror", ] [[package]] @@ -9766,10 +9880,10 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "honggfuzz", "parity-scale-codec", - "rand 0.8.5", + "rand 0.8.4", "scale-info", "sp-npos-elections", "sp-runtime", @@ -9813,7 +9927,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "paste", + "paste 1.0.6", "rand 0.7.3", "scale-info", "serde", @@ -9826,7 +9940,6 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", - "sp-weights", "substrate-test-runtime-client", "zstd", ] @@ -9954,13 +10067,13 @@ dependencies = [ name = "sp-state-machine" version = "0.12.0" dependencies = [ - "array-bytes", "assert_matches", "hash-db", + "hex-literal", "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pretty_assertions", "rand 0.7.3", "smallvec", @@ -9972,7 +10085,6 @@ dependencies = [ "sp-trie", "thiserror", "tracing", - "trie-db", "trie-root", ] @@ -9992,6 +10104,19 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-tasks" +version = "4.0.0-dev" +dependencies = [ + "log", + "parity-scale-codec", + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime-interface", + "sp-std", +] + [[package]] name = "sp-test-primitives" version = "2.0.0" @@ -10057,23 +10182,16 @@ dependencies = [ name = "sp-trie" version = "6.0.0" dependencies = [ - "ahash", - "array-bytes", "criterion", "hash-db", - "hashbrown 0.12.3", - "lazy_static", - "lru", + "hex-literal", "memory-db", - "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.1", "scale-info", "sp-core", "sp-runtime", "sp-std", "thiserror", - "tracing", "trie-bench", "trie-db", "trie-root", @@ -10086,7 +10204,7 @@ version = "5.0.0" dependencies = [ "impl-serde", "parity-scale-codec", - "parity-wasm", + "parity-wasm 0.42.2", "scale-info", "serde", "sp-core-hashing-proc-macro", @@ -10119,42 +10237,17 @@ dependencies = [ "wasmtime", ] -[[package]] -name = "sp-weights" -version = "4.0.0" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "serde", - "smallvec", - "sp-arithmetic", - "sp-core", - "sp-debug-derive", - "sp-std", -] - [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "ss58-registry" -version = "1.34.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a9821878e1f13aba383aa40a86fb1b33c7265774ec91e32563cb1dd1577496" +checksum = "ceb8b72a924ccfe7882d0e26144c114503760a4d1248bb5cd06c8ab2d55404cc" dependencies = [ "Inflector", "num-format", @@ -10177,34 +10270,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "static_init" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" -dependencies = [ - "bitflags", - "cfg_aliases", - "libc", - "parking_lot 0.11.2", - "parking_lot_core 0.8.5", - "static_init_macro", - "winapi", -] - -[[package]] -name = "static_init_macro" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" -dependencies = [ - "cfg_aliases", - "memchr", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "statrs" version = "0.15.0" @@ -10215,7 +10280,7 @@ dependencies = [ "lazy_static", "nalgebra", "num-traits", - "rand 0.8.5", + "rand 0.8.4", ] [[package]] @@ -10250,7 +10315,7 @@ dependencies = [ name = "subkey" version = "2.0.2" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "sc-cli", ] @@ -10278,7 +10343,7 @@ dependencies = [ name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.0.11", + "clap 3.1.18", "frame-support", "frame-system", "sc-cli", @@ -10298,8 +10363,6 @@ dependencies = [ "sc-rpc-api", "scale-info", "serde", - "sp-core", - "sp-runtime", "sp-storage", "tokio", ] @@ -10341,20 +10404,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "substrate-rpc-client" -version = "0.10.0-dev" -dependencies = [ - "async-trait", - "jsonrpsee", - "log", - "sc-rpc-api", - "serde", - "sp-core", - "sp-runtime", - "tokio", -] - [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" @@ -10380,9 +10429,9 @@ dependencies = [ name = "substrate-test-client" version = "2.0.1" dependencies = [ - "array-bytes", "async-trait", "futures", + "hex", "parity-scale-codec", "sc-client-api", "sc-client-db", @@ -10407,7 +10456,7 @@ version = "2.0.0" dependencies = [ "beefy-merkle-tree", "beefy-primitives", - "cfg-if", + "cfg-if 1.0.0", "frame-support", "frame-system", "frame-system-rpc-runtime-api", @@ -10473,7 +10522,7 @@ version = "2.0.0" dependencies = [ "futures", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "sc-transaction-pool", "sc-transaction-pool-api", "sp-blockchain", @@ -10525,7 +10574,7 @@ dependencies = [ "tempfile", "toml", "walkdir", - "wasm-opt", + "wasm-gc-api", ] [[package]] @@ -10596,7 +10645,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fastrand", "libc", "redox_syscall", @@ -10622,20 +10671,26 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "textwrap" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" + [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -10668,9 +10723,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.1+5.3.0-patched" +version = "0.4.2+5.2.1-patched.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "931e876f91fed0827f863a2d153897790da0b24d882c721a79cb3beb0b903261" +checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" dependencies = [ "cc", "fs_extra", @@ -10753,7 +10808,7 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "pin-project-lite 0.2.6", "signal-hook-registry", "socket2", @@ -10843,7 +10898,7 @@ version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "log", "pin-project-lite 0.2.6", "tracing-attributes", @@ -10933,9 +10988,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.32.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dae77b1daad50cd3ed94c506d2dab27e2e47f7b5153a6d4b1992bb3f6028cb" +checksum = "57ecec5d10427b35e9ae374b059dccc0801d02d832617c04c78afc7a8c5c4a34" dependencies = [ "criterion", "hash-db", @@ -10949,12 +11004,12 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.24.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004e1e8f92535694b4cb1444dc5a8073ecf0815e3357f729638b9f8fc4062908" +checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" dependencies = [ "hash-db", - "hashbrown 0.12.3", + "hashbrown 0.12.0", "log", "rustc-hex", "smallvec", @@ -10981,12 +11036,12 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.22.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" dependencies = [ "async-trait", - "cfg-if", + "cfg-if 1.0.0", "data-encoding", "enum-as-inner", "futures-channel", @@ -10995,30 +11050,30 @@ dependencies = [ "idna", "ipnet", "lazy_static", - "rand 0.8.5", + "log", + "rand 0.8.4", "smallvec", "thiserror", "tinyvec", - "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.22.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" +checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "futures-util", "ipconfig", "lazy_static", + "log", "lru-cache", - "parking_lot 0.12.1", + "parking_lot 0.12.0", "resolv-conf", "smallvec", "thiserror", - "tracing", "trust-dns-proto", ] @@ -11032,8 +11087,8 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" name = "try-runtime-cli" version = "0.10.0-dev" dependencies = [ - "clap 4.0.11", - "frame-try-runtime", + "clap 3.1.18", + "jsonrpsee", "log", "parity-scale-codec", "remote-externalities", @@ -11049,9 +11104,6 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-version", - "sp-weights", - "substrate-rpc-client", - "tokio", "zstd", ] @@ -11083,9 +11135,9 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "digest 0.10.3", - "rand 0.8.5", + "rand 0.8.4", "static_assertions", ] @@ -11160,9 +11212,9 @@ checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "universal-hash" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array 0.14.4", "subtle", @@ -11298,7 +11350,7 @@ version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "wasm-bindgen-macro", ] @@ -11323,7 +11375,7 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -11359,53 +11411,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" [[package]] -name = "wasm-instrument" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa1dafb3e60065305741e83db35c6c2584bb3725b692b5b66148a38d72ace6cd" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "wasm-opt" -version = "0.110.2" +name = "wasm-gc-api" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b68e8037b4daf711393f4be2056246d12d975651b14d581520ad5d1f19219cec" +checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" dependencies = [ - "anyhow", - "libc", - "strum", - "strum_macros", - "tempfile", - "thiserror", - "wasm-opt-cxx-sys", - "wasm-opt-sys", -] - -[[package]] -name = "wasm-opt-cxx-sys" -version = "0.110.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91adbad477e97bba3fbd21dd7bfb594e7ad5ceb9169ab1c93ab9cb0ada636b6f" -dependencies = [ - "anyhow", - "cxx", - "cxx-build", - "wasm-opt-sys", + "log", + "parity-wasm 0.32.0", + "rustc-demangle", ] [[package]] -name = "wasm-opt-sys" -version = "0.110.2" +name = "wasm-instrument" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec4fa5a322a4e6ac22fd141f498d56afbdbf9df5debeac32380d2dcaa3e06941" +checksum = "962e5b0401bbb6c887f54e69b8c496ea36f704df65db73e81fd5ff8dc3e63a9f" dependencies = [ - "anyhow", - "cc", - "cxx", - "cxx-build", - "regex", + "parity-wasm 0.42.2", ] [[package]] @@ -11429,7 +11451,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f727a39e7161f7438ddb8eafe571b67c576a8c2fb459f666d9053b5bba4afdea" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "indexmap", "js-sys", "loupe", @@ -11531,7 +11553,7 @@ dependencies = [ "enumset", "lazy_static", "loupe", - "memmap2", + "memmap2 0.5.0", "more-asserts", "rustc-demangle", "serde", @@ -11549,11 +11571,11 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccd7fdc60e252a795c849b3f78a81a134783051407e7e279c10b7019139ef8dc" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "enum-iterator", "enumset", "leb128", - "libloading", + "libloading 0.7.0", "loupe", "object 0.28.3", "rkyv", @@ -11574,12 +11596,12 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcff0cd2c01a8de6009fd863b14ea883132a468a24f2d2ee59dc34453d3a31b5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "enum-iterator", "enumset", "leb128", "loupe", - "region", + "region 3.0.0", "rkyv", "wasmer-compiler", "wasmer-engine", @@ -11621,14 +11643,14 @@ checksum = "afdc46158517c2769f9938bc222a7d41b3bb330824196279d8aa2d667cd40641" dependencies = [ "backtrace", "cc", - "cfg-if", + "cfg-if 1.0.0", "enum-iterator", "indexmap", "libc", "loupe", "memoffset", "more-asserts", - "region", + "region 3.0.0", "rkyv", "serde", "thiserror", @@ -11638,35 +11660,28 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.13.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc13b3c219ca9aafeec59150d80d89851df02e0061bc357b4d66fc55a8d38787" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" dependencies = [ - "parity-wasm", + "downcast-rs", + "errno", + "libc", + "libm", + "memory_units", + "num-rational 0.2.4", + "num-traits", + "parity-wasm 0.42.2", "wasmi-validation", - "wasmi_core", ] [[package]] name = "wasmi-validation" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "wasmi_core" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a088e8c4c59c6f2b9eae169bf86328adccc477c00b56d3661e3e9fb397b184" +checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" dependencies = [ - "downcast-rs", - "libm", - "memory_units", - "num-rational", - "num-traits", + "parity-wasm 0.42.2", ] [[package]] @@ -11677,55 +11692,49 @@ checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" [[package]] name = "wasmparser" -version = "0.89.1" +version = "0.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5d3e08b13876f96dd55608d03cd4883a0545884932d5adf11925876c96daef" +checksum = "570460c58b21e9150d2df0eaaedbb7816c34bcec009ae0dcc976e40ba81463e7" dependencies = [ "indexmap", ] [[package]] name = "wasmtime" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a10dc9784d8c3a33c970e3939180424955f08af2e7f20368ec02685a0e8f065" +checksum = "c842f9c8e190fe01300fc8d715e9368c775670fb9856247c67abffdb5236d6db" dependencies = [ "anyhow", + "backtrace", "bincode", - "cfg-if", + "cfg-if 1.0.0", "indexmap", + "lazy_static", "libc", "log", - "object 0.29.0", + "object 0.28.3", "once_cell", - "paste", + "paste 1.0.6", "psm", "rayon", + "region 2.2.0", "serde", "target-lexicon", - "wasmparser 0.89.1", + "wasmparser 0.85.0", "wasmtime-cache", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", "wasmtime-runtime", - "windows-sys 0.36.1", -] - -[[package]] -name = "wasmtime-asm-macros" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4dbdc6daf68528cad1275ac91e3f51848ce9824385facc94c759f529decdf8" -dependencies = [ - "cfg-if", + "winapi", ] [[package]] name = "wasmtime-cache" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f507f3fa1ee1b2f9a83644e2514242b1dfe580782c0eb042f1ef70255bc4ffe" +checksum = "cce2aa752e864a33eef2a6629edc59554e75f0bc1719431dac5e49eed516af69" dependencies = [ "anyhow", "base64", @@ -11733,125 +11742,129 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix", + "rustix 0.33.7", "serde", "sha2 0.9.8", "toml", - "windows-sys 0.36.1", + "winapi", "zstd", ] [[package]] name = "wasmtime-cranelift" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f03cf79d982fc68e94ba0bea6a300a3b94621c4eb9705eece0a4f06b235a3b5" +checksum = "922361eb8c03cea8909bc922471202f6c6bc2f0c682fac2fe473740441c86b3b" dependencies = [ "anyhow", - "cranelift-codegen 0.88.0", - "cranelift-entity 0.88.0", - "cranelift-frontend 0.88.0", + "cranelift-codegen 0.85.0", + "cranelift-entity 0.85.0", + "cranelift-frontend 0.85.0", "cranelift-native", "cranelift-wasm", "gimli 0.26.1", "log", - "object 0.29.0", + "more-asserts", + "object 0.28.3", "target-lexicon", "thiserror", - "wasmparser 0.89.1", + "wasmparser 0.85.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c587c62e91c5499df62012b87b88890d0eb470b2ffecc5964e9da967b70c77c" +checksum = "e602f1120fc40a3f016f1f69d08c86cfeff7b867bed1462901953e6871f85167" dependencies = [ "anyhow", - "cranelift-entity 0.88.0", + "cranelift-entity 0.85.0", "gimli 0.26.1", "indexmap", "log", - "object 0.29.0", + "more-asserts", + "object 0.28.3", "serde", "target-lexicon", "thiserror", - "wasmparser 0.89.1", + "wasmparser 0.85.0", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "047839b5dabeae5424a078c19b8cc897e5943a7fadc69e3d888b9c9a897666b3" +checksum = "49af1445759a8e797a92f27dd0983c155615648263052e0b80d69e7d223896b7" dependencies = [ "addr2line", "anyhow", "bincode", - "cfg-if", + "cfg-if 1.0.0", "cpp_demangle", "gimli 0.26.1", "log", - "object 0.29.0", + "object 0.28.3", + "region 2.2.0", "rustc-demangle", - "rustix", + "rustix 0.33.7", "serde", "target-lexicon", "thiserror", "wasmtime-environ", "wasmtime-jit-debug", "wasmtime-runtime", - "windows-sys 0.36.1", + "winapi", ] [[package]] name = "wasmtime-jit-debug" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b299569abf6f99b7b8e020afaf84a700e8636c6a42e242069267322cd5818235" +checksum = "e6d5dd480cc6dc0a401653e45b79796a3317f8228990d84bc2271bdaf0810071" dependencies = [ - "object 0.29.0", - "once_cell", - "rustix", + "lazy_static", + "object 0.28.3", + "rustix 0.33.7", ] [[package]] name = "wasmtime-runtime" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae79e0515160bd5abee5df50a16c4eb8db9f71b530fc988ae1d9ce34dcb8dd01" +checksum = "e875bcd02d1ecfc7d099dd58354d55d73467652eb2b103ff470fe3aecb7d0381" dependencies = [ "anyhow", + "backtrace", "cc", - "cfg-if", + "cfg-if 1.0.0", "indexmap", "libc", "log", "mach", "memfd", "memoffset", - "paste", - "rand 0.8.5", - "rustix", + "more-asserts", + "rand 0.8.4", + "region 2.2.0", + "rustix 0.33.7", "thiserror", - "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", - "windows-sys 0.36.1", + "winapi", ] [[package]] name = "wasmtime-types" -version = "1.0.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790cf43ee8e2d5dad1780af30f00d7a972b74725fb1e4f90c28d62733819b185" +checksum = "8fd63a19ba61ac7448add4dc1fecb8d78304812af2a52dad04b89f887791b156" dependencies = [ - "cranelift-entity 0.88.0", + "cranelift-entity 0.85.0", "serde", "thiserror", - "wasmparser 0.89.1", + "wasmparser 0.85.0", ] [[package]] @@ -11902,10 +11915,10 @@ dependencies = [ ] [[package]] -name = "wepoll-ffi" -version = "0.1.2" +name = "wepoll-sys" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] @@ -11959,15 +11972,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "aac7fef12f4b59cd0a29339406cc9203ab44e440ddff6b3f5a41455349fa9cf3" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", + "windows_aarch64_msvc 0.29.0", + "windows_i686_gnu 0.29.0", + "windows_i686_msvc 0.29.0", + "windows_x86_64_gnu 0.29.0", + "windows_x86_64_msvc 0.29.0", ] [[package]] @@ -11998,15 +12011,15 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.32.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +checksum = "c3d027175d00b01e0cbeb97d6ab6ebe03b12330a35786cbaca5252b1c4bf5d9b" [[package]] name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" [[package]] name = "windows_aarch64_msvc" @@ -12016,15 +12029,15 @@ checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" -version = "0.32.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +checksum = "8793f59f7b8e8b01eda1a652b2697d87b93097198ae85f823b969ca5b89bba58" [[package]] name = "windows_i686_gnu" -version = "0.34.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" [[package]] name = "windows_i686_gnu" @@ -12034,15 +12047,15 @@ checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" -version = "0.32.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +checksum = "8602f6c418b67024be2996c512f5f995de3ba417f4c75af68401ab8756796ae4" [[package]] name = "windows_i686_msvc" -version = "0.34.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" [[package]] name = "windows_i686_msvc" @@ -12052,15 +12065,15 @@ checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" -version = "0.32.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +checksum = "f3d615f419543e0bd7d2b3323af0d86ff19cbc4f816e6453f36a2c2ce889c354" [[package]] name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" [[package]] name = "windows_x86_64_gnu" @@ -12070,15 +12083,15 @@ checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" -version = "0.32.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +checksum = "11d95421d9ed3672c280884da53201a5c46b7b2765ca6faf34b0d71cf34a3561" [[package]] name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" [[package]] name = "windows_x86_64_msvc" @@ -12106,9 +12119,9 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "1.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" +checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" dependencies = [ "curve25519-dalek 3.0.2", "rand_core 0.5.1", @@ -12117,23 +12130,23 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "0c0608f53c1dc0bad505d03a34bbd49fbf2ad7b51eb036123e896365532745a1" dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.0", + "rand 0.8.4", "static_assertions", ] [[package]] name = "zeroize" -version = "1.5.7" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 393943d3c6550..6d78db8ec2860 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,6 @@ members = [ "client/keystore", "client/network", "client/network-gossip", - "client/network/bitswap", "client/network/common", "client/network/light", "client/network/sync", @@ -55,7 +54,6 @@ members = [ "client/rpc", "client/rpc-api", "client/rpc-servers", - "client/rpc-spec-v2", "client/service", "client/service/test", "client/state-db", @@ -86,13 +84,13 @@ members = [ "frame/child-bounties", "frame/collective", "frame/contracts", - "frame/contracts/primitives", + "frame/contracts/rpc", + "frame/contracts/rpc/runtime-api", "frame/conviction-voting", "frame/democracy", "frame/dex", "frame/dex/rpc", "frame/dex/rpc/runtime-api", - "frame/fast-unstake", "frame/try-runtime", "frame/election-provider-multi-phase", "frame/election-provider-support", @@ -101,6 +99,7 @@ members = [ "frame/election-provider-support/solution-type/fuzzer", "frame/examples/basic", "frame/examples/offchain-worker", + "frame/examples/parallel", "frame/executive", "frame/gilt", "frame/grandpa", @@ -118,7 +117,6 @@ members = [ "frame/preimage", "frame/proxy", "frame/nomination-pools", - "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", "frame/nomination-pools/test-staking", "frame/nomination-pools/runtime-api", @@ -137,7 +135,6 @@ members = [ "frame/staking/reward-fn", "frame/state-trie-migration", "frame/sudo", - "frame/root-offences", "frame/support", "frame/support/procedural", "frame/support/procedural/tools", @@ -207,6 +204,7 @@ members = [ "primitives/state-machine", "primitives/std", "primitives/storage", + "primitives/tasks", "primitives/test-primitives", "primitives/timestamp", "primitives/tracing", @@ -216,7 +214,6 @@ members = [ "primitives/version", "primitives/version/proc-macro", "primitives/wasm-interface", - "primitives/weights", "test-utils/client", "test-utils/derive", "test-utils/runtime", @@ -234,7 +231,6 @@ members = [ "utils/frame/rpc/system", "utils/frame/generate-bags", "utils/frame/generate-bags/node-runtime", - "utils/frame/rpc/client", "utils/prometheus", "utils/wasm-builder", ] @@ -256,6 +252,7 @@ members = [ # This list is ordered alphabetically. [profile.dev.package] blake2 = { opt-level = 3 } +blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } @@ -264,7 +261,7 @@ crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } -ed25519-zebra = { opt-level = 3 } +ed25519-dalek = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/README.md b/README.md index fd0f62432200c..e535f2947e3d1 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ This pallet allows to: - exchange the LP token back to assets - swap 2 assets if there is a pool created - query for an exchange price via a new RPC endpoint -## Getting Started ## RPC usage diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 0f6fd9450aeee..8defb870fa1b0 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -114,7 +114,7 @@ local node template. ### Multi-Node Local Testnet If you want to see the multi-node consensus algorithm in action, refer to our -[Simulate a network tutorial](https://docs.substrate.io/tutorials/get-started/simulate-network/). +[Start a Private Network tutorial](https://docs.substrate.io/tutorials/v3/private-network). ## Template Structure @@ -129,7 +129,7 @@ Substrate-based blockchain nodes expose a number of capabilities: - Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the nodes in the network to communicate with one another. - Consensus: Blockchains must have a way to come to - [consensus](https://docs.substrate.io/main-docs/fundamentals/consensus/) on the state of the + [consensus](https://docs.substrate.io/v3/advanced/consensus) on the state of the network. Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). @@ -138,20 +138,22 @@ Substrate-based blockchain nodes expose a number of capabilities: There are several files in the `node` directory - take special note of the following: - [`chain_spec.rs`](./node/src/chain_spec.rs): A - [chain specification](https://docs.substrate.io/main-docs/build/chain-spec/) is a + [chain specification](https://docs.substrate.io/v3/runtime/chain-specs) is a source code file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the `development_config` and `testnet_genesis` functions, which are used to define the genesis state for the local development chain configuration. These functions identify some - [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) + [well-known accounts](https://docs.substrate.io/v3/tools/subkey#well-known-keys) and use them to configure the blockchain's initial state. - [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of the libraries that this file imports and the names of the functions it invokes. In particular, there are references to consensus-related topics, such as the - [block finalization and forks](https://docs.substrate.io/main-docs/fundamentals/consensus/#finalization-and-forks) - and other [consensus mechanisms](https://docs.substrate.io/main-docs/fundamentals/consensus/#default-consensus-models) - such as Aura for block authoring and GRANDPA for finality. + [longest chain rule](https://docs.substrate.io/v3/advanced/consensus#longest-chain-rule), + the [Aura](https://docs.substrate.io/v3/advanced/consensus#aura) block authoring + mechanism and the + [GRANDPA](https://docs.substrate.io/v3/advanced/consensus#grandpa) finality + gadget. After the node has been [built](#build), refer to the embedded documentation to learn more about the capabilities and configuration parameters that it exposes: @@ -163,15 +165,16 @@ capabilities and configuration parameters that it exposes: ### Runtime In Substrate, the terms -"runtime" and "state transition function" +"[runtime](https://docs.substrate.io/v3/getting-started/glossary#runtime)" and +"[state transition function](https://docs.substrate.io/v3/getting-started/glossary#state-transition-function-stf)" are analogous - they refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define. The Substrate project in this repository uses -[FRAME](https://docs.substrate.io/main-docs/fundamentals/runtime-intro/#frame) to construct a +the [FRAME](https://docs.substrate.io/v3/runtime/frame) framework to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful -[macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to +[macro language](https://docs.substrate.io/v3/runtime/macros) that makes it easy to create pallets and flexibly compose them to create blockchains that can address -[a variety of needs](https://substrate.io/ecosystem/projects/). +[a variety of needs](https://www.substrate.io/substrate-users/). Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: @@ -181,7 +184,8 @@ the following: - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core - FRAME Support [system](https://docs.substrate.io/reference/frame-pallets/#system-pallets) library. + [FRAME Support](https://docs.substrate.io/v3/runtime/frame#support-crate) + library. ### Pallets @@ -192,12 +196,12 @@ template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs A FRAME pallet is compromised of a number of blockchain primitives: - Storage: FRAME defines a rich set of powerful - [storage abstractions](https://docs.substrate.io/main-docs/build/runtime-storage/) that makes + [storage abstractions](https://docs.substrate.io/v3/runtime/storage) that makes it easy to use Substrate's efficient key-value database to manage the evolving state of a blockchain. - Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) from outside of the runtime in order to update its state. -- Events: Substrate uses [events and errors](https://docs.substrate.io/main-docs/build/events-errors/) +- Events: Substrate uses [events and errors](https://docs.substrate.io/v3/runtime/events-and-errors) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. - Config: The `Config` configuration interface is used to define the types and parameters upon diff --git a/bin/node-template/docker-compose.yml b/bin/node-template/docker-compose.yml index bc1922f47d963..cfc4437bbae41 100644 --- a/bin/node-template/docker-compose.yml +++ b/bin/node-template/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.2" services: dev: container_name: node-template - image: paritytech/ci-linux:production + image: paritytech/ci-linux:974ba3ac-20201006 working_dir: /var/www/node-template ports: - "9944:9944" diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md index 2755966e3ae0f..ea133ca847af7 100644 --- a/bin/node-template/docs/rust-setup.md +++ b/bin/node-template/docs/rust-setup.md @@ -3,7 +3,7 @@ title: Installation --- This guide is for reference only, please check the latest information on getting starting with Substrate -[here](https://docs.substrate.io/main-docs/install/). +[here](https://docs.substrate.io/v3/getting-started/installation/). This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first @@ -73,11 +73,11 @@ brew install openssl ### Windows -**_PLEASE NOTE:_** Native Windows development of Substrate is _not_ very well supported! It is _highly_ +**_PLEASE NOTE:_** Native development of Substrate is _not_ very well supported! It is _highly_ recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). Please refer to the separate -[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/). +[guide for native Windows development](https://docs.substrate.io/v3/getting-started/windows-users/). ## Rust developer environment diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index d94955f722605..eeba198da8212 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } sp-core = { version = "6.0.0", path = "../../../primitives/core" } @@ -67,12 +67,7 @@ substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build [features] default = [] -# Dependencies that are only required if runtime benchmarking should be build. -runtime-benchmarks = [ - "node-template-runtime/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-benchmarking-cli/runtime-benchmarks", -] +runtime-benchmarks = ["node-template-runtime/runtime-benchmarks"] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli/try-runtime"] +try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli"] diff --git a/bin/node-template/node/src/benchmarking.rs b/bin/node-template/node/src/benchmarking.rs index 90fe06edf04b8..f0e32104cd3ee 100644 --- a/bin/node-template/node/src/benchmarking.rs +++ b/bin/node-template/node/src/benchmarking.rs @@ -119,7 +119,7 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { pub fn create_benchmark_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, - call: runtime::RuntimeCall, + call: runtime::Call, nonce: u32, ) -> runtime::UncheckedExtrinsic { let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index dd610477ac469..4ab4d34210c98 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -2,7 +2,7 @@ use sc_cli::RunCmd; #[derive(Debug, clap::Parser)] pub struct Cli { - #[command(subcommand)] + #[clap(subcommand)] pub subcommand: Option, #[clap(flatten)] @@ -12,7 +12,7 @@ pub struct Cli { #[derive(Debug, clap::Subcommand)] pub enum Subcommand { /// Key management cli utilities - #[command(subcommand)] + #[clap(subcommand)] Key(sc_cli::KeySubcommand), /// Build a chain specification. @@ -37,7 +37,7 @@ pub enum Subcommand { Revert(sc_cli::RevertCmd), /// Sub-commands concerned with benchmarking. - #[command(subcommand)] + #[clap(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), /// Try some command against runtime state. diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 6d293b7b85fcc..142f0b40c325e 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -127,12 +127,6 @@ pub fn run() -> sc_cli::Result<()> { let PartialComponents { client, .. } = service::new_partial(&config)?; cmd.run(client) }, - #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => Err( - "Storage benchmarking can be enabled with `--features runtime-benchmarks`." - .into(), - ), - #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => { let PartialComponents { client, backend, .. } = service::new_partial(&config)?; @@ -145,13 +139,7 @@ pub fn run() -> sc_cli::Result<()> { let PartialComponents { client, .. } = service::new_partial(&config)?; let ext_builder = RemarkBuilder::new(client.clone()); - cmd.run( - config, - client, - inherent_benchmark_data()?, - Vec::new(), - &ext_builder, - ) + cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) }, BenchmarkCmd::Extrinsic(cmd) => { let PartialComponents { client, .. } = service::new_partial(&config)?; @@ -165,7 +153,7 @@ pub fn run() -> sc_cli::Result<()> { )), ]); - cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory) + cmd.run(client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index ee8464688c79c..ffb2440caa0ed 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,7 +1,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::BlockBackend; +use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; pub use sc_executor::NativeElseWasmExecutor; use sc_finality_grandpa::SharedVoterState; @@ -113,7 +113,7 @@ pub fn new_partial( let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let import_queue = - sc_consensus_aura::import_queue::(ImportQueueParams { + sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), @@ -126,13 +126,15 @@ pub fn new_partial( slot_duration, ); - Ok((slot, timestamp)) + Ok((timestamp, slot)) }, spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new( + client.executor().clone(), + ), registry: config.prometheus_registry(), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), - compatibility_mode: Default::default(), })?; Ok(sc_service::PartialComponents { @@ -192,7 +194,7 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter) = + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -239,7 +241,6 @@ pub fn new_full(mut config: Configuration) -> Result rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, - tx_handler_controller, config, telemetry: telemetry.as_mut(), })?; @@ -253,9 +254,12 @@ pub fn new_full(mut config: Configuration) -> Result telemetry.as_ref().map(|x| x.handle()), ); + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - let aura = sc_consensus_aura::start_aura::( + let aura = sc_consensus_aura::start_aura::( StartAuraParams { slot_duration, client, @@ -271,17 +275,17 @@ pub fn new_full(mut config: Configuration) -> Result slot_duration, ); - Ok((slot, timestamp)) + Ok((timestamp, slot)) }, force_authoring, backoff_authoring_blocks, keystore: keystore_container.sync_keystore(), + can_author_with, sync_oracle: network.clone(), justification_sync_link: network.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), - compatibility_mode: Default::default(), }, )?; diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 3cfcef9d902ce..6f7a4b1d25841 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -30,7 +30,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../ default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 28d36ac2c6321..067c7ce2575a0 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -2,7 +2,7 @@ /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// +/// pub use pallet::*; #[cfg(test)] @@ -19,33 +19,33 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + // The pallet's runtime storage items. - // https://docs.substrate.io/main-docs/build/runtime-storage/ + // https://docs.substrate.io/v3/runtime/storage #[pallet::storage] #[pallet::getter(fn something)] // Learn more about declaring storage items: - // https://docs.substrate.io/main-docs/build/runtime-storage/#declaring-storage-items + // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items pub type Something = StorageValue<_, u32>; // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/main-docs/build/events-errors/ + // https://docs.substrate.io/v3/runtime/events-and-errors #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Event documentation should end with an array that provides descriptive names for event /// parameters. [something, who] - SomethingStored { something: u32, who: T::AccountId }, + SomethingStored(u32, T::AccountId), } // Errors inform users that something went wrong. @@ -64,24 +64,24 @@ pub mod pallet { impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. - #[pallet::weight(10_000 + T::DbWeight::get().writes(1).ref_time())] + #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/main-docs/build/origins/ + // https://docs.substrate.io/v3/runtime/origins let who = ensure_signed(origin)?; // Update storage. >::put(something); // Emit an event. - Self::deposit_event(Event::SomethingStored { something, who }); + Self::deposit_event(Event::SomethingStored(something, who)); // Return a successful DispatchResultWithPostInfo Ok(()) } /// An example dispatchable that may throw a custom error. - #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1).ref_time())] + #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] pub fn cause_error(origin: OriginFor) -> DispatchResult { let _who = ensure_signed(origin)?; diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 989681fa59a00..8721fe6c78851 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -17,8 +17,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system, - TemplateModule: pallet_template, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, } ); @@ -27,8 +27,8 @@ impl system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -36,7 +36,7 @@ impl system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -50,7 +50,7 @@ impl system::Config for Test { } impl pallet_template::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } // Build genesis storage according to the mock runtime. diff --git a/bin/node-template/pallets/template/src/tests.rs b/bin/node-template/pallets/template/src/tests.rs index 527aec8ed00c0..2205658601721 100644 --- a/bin/node-template/pallets/template/src/tests.rs +++ b/bin/node-template/pallets/template/src/tests.rs @@ -5,7 +5,7 @@ use frame_support::{assert_noop, assert_ok}; fn it_works_for_default_value() { new_test_ext().execute_with(|| { // Dispatch a signed extrinsic. - assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); + assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); // Read pallet storage and assert an expected result. assert_eq!(TemplateModule::something(), Some(42)); }); @@ -15,9 +15,6 @@ fn it_works_for_default_value() { fn correct_error_for_none_value() { new_test_ext().execute_with(|| { // Ensure the expected error is thrown when no value is present. - assert_noop!( - TemplateModule::cause_error(RuntimeOrigin::signed(1)), - Error::::NoneValue - ); + assert_noop!(TemplateModule::cause_error(Origin::signed(1)), Error::::NoneValue); }); } diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 139264657f89d..734ed089aa4bd 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -46,6 +46,7 @@ pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-fe # Used for runtime benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +hex-literal = { version = "0.3.4", optional = true } # Local Dependencies pallet-template = { version = "4.0.0-dev", default-features = false, path = "../pallets/template" } @@ -56,16 +57,12 @@ substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-bu [features] default = ["std"] std = [ - "frame-try-runtime?/std", - "frame-system-benchmarking?/std", - "frame-benchmarking?/std", "codec/std", "scale-info/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", - "frame-try-runtime/std", "pallet-aura/std", "pallet-balances/std", "pallet-grandpa/std", @@ -90,8 +87,9 @@ std = [ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", + "frame-system-benchmarking", "frame-system/runtime-benchmarks", + "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-template/runtime-benchmarks", @@ -99,10 +97,9 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-try-runtime/try-runtime", "frame-executive/try-runtime", + "frame-try-runtime", "frame-system/try-runtime", - "frame-support/try-runtime", "pallet-aura/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 1d0e18d31bf80..88fc86db02ef9 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -14,9 +14,7 @@ use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify, - }, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; @@ -40,7 +38,7 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; -use pallet_transaction_payment::{ConstFeeMultiplier, CurrencyAdapter, Multiplier}; +use pallet_transaction_payment::CurrencyAdapter; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; @@ -139,11 +137,8 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::with_sensible_defaults( - (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), - NORMAL_DISPATCH_RATIO, - ); + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; @@ -161,7 +156,7 @@ impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; + type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. @@ -175,9 +170,9 @@ impl frame_system::Config for Runtime { /// The header type. type Header = generic::Header; /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; + type Event = Event; /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. @@ -212,7 +207,8 @@ impl pallet_aura::Config for Runtime { } impl pallet_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; + type Call = Call; type KeyOwnerProofSystem = (); @@ -248,34 +244,30 @@ impl pallet_balances::Config for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; } -parameter_types! { - pub FeeMultiplier: Multiplier = Multiplier::one(); -} - impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = ConstFeeMultiplier; + type FeeMultiplierUpdate = (); } impl pallet_sudo::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; } /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -316,12 +308,10 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); - /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -477,23 +467,6 @@ impl_runtime_apis! { } } - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - } - #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( @@ -524,8 +497,18 @@ impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl baseline::Config for Runtime {} - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -545,51 +528,8 @@ impl_runtime_apis! { (weight, BlockWeights::get().max_block) } - fn execute_block( - block: Block, - state_root_check: bool, - select: frame_try_runtime::TryStateSelect - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, select).expect("execute-block failed") + fn execute_block_no_check(block: Block) -> Weight { + Executive::execute_block_no_check(block) } } } - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::traits::WhitelistedStorageKeys; - use sp_core::hexdisplay::HexDisplay; - use std::collections::HashSet; - - #[test] - fn check_whitelist() { - let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() - .iter() - .map(|e| HexDisplay::from(&e.key).to_string()) - .collect(); - - // Block Number - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac") - ); - // Total Issuance - assert!( - whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80") - ); - // Execution Phase - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a") - ); - // Event Count - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850") - ); - // System Events - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7") - ); - } -} diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 5fb4c418e8ae8..4d090c71a72e9 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -5,14 +5,11 @@ authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -array-bytes = "4.1" -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } log = "0.4.17" node-primitives = { version = "2.0.0", path = "../primitives" } node-testing = { version = "3.0.0-dev", path = "../testing" } @@ -21,10 +18,10 @@ sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } serde = "1.0.136" -serde_json = "1.0.85" +serde_json = "1.0.79" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } -kvdb = "0.12.0" -kvdb-rocksdb = "0.16.0" +kvdb = "0.11.0" +kvdb-rocksdb = "0.15.1" sp-trie = { version = "6.0.0", path = "../../../primitives/trie" } sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } @@ -35,10 +32,11 @@ sp-tracing = { version = "5.0.0", path = "../../../primitives/tracing" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" +hex = "0.4.0" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } -parity-db = "0.4.2" +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-db = { version = "0.3" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } futures = { version = "0.3.21", features = ["thread-pool"] } diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index b6ad3ecd80068..3b3060a888349 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -132,7 +132,7 @@ pub fn run_benchmark(benchmark: Box, mode: Mode) -> Be durations.push(duration.as_nanos()); } - durations.sort(); + durations.sort_unstable(); let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 76bd3a3240c51..2b26ed9089a51 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut}; +use sp_trie::{trie_types::TrieDBMutV1, TrieMut}; use crate::simple_trie::SimpleTrie; @@ -37,16 +37,13 @@ pub fn generate_trie( let (db, overlay) = { let mut overlay = HashMap::new(); overlay.insert( - array_bytes::hex2bytes( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ) - .expect("null key is valid"), + hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + .expect("null key is valid"), Some(vec![0]), ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = - TrieDBMutBuilderV1::::new(&mut trie, &mut root).build(); + let mut trie_db = TrieDBMutV1::::new(&mut trie, &mut root); for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 28a322834271c..47f630eb68700 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -34,7 +34,7 @@ use std::borrow::Cow; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; -use sc_client_api::{backend::Backend, HeaderBackend}; +use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; @@ -127,15 +127,10 @@ impl core::Benchmark for ImportBenchmark { context.import_block(self.block.clone()); let elapsed = start.elapsed(); - let hash = context - .client - .expect_block_hash_from_id(&BlockId::number(1)) - .expect("Block 1 was imported; qed"); - // Sanity checks. context .client - .state_at(hash) + .state_at(&BlockId::number(1)) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 8a5d99640eb1b..d97c7af26535b 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -43,18 +43,18 @@ use crate::{ }; #[derive(Debug, Parser)] -#[command(name = "node-bench", about = "Node integration benchmarks")] +#[clap(name = "node-bench", about = "Node integration benchmarks")] struct Opt { /// Show list of all available benchmarks. /// /// Will output ("name", "path"). Benchmarks can then be filtered by path. - #[arg(short, long)] + #[clap(short, long)] list: bool, /// Machine readable json output. /// /// This also suppresses all regular output (except to stderr) - #[arg(short, long)] + #[clap(short, long)] json: bool, /// Filter benchmarks. @@ -63,7 +63,7 @@ struct Opt { filter: Option, /// Number of transactions for block import with `custom` size. - #[arg(long)] + #[clap(long)] transactions: Option, /// Mode @@ -72,7 +72,7 @@ struct Opt { /// /// "profile" mode adds pauses between measurable runs, /// so that actual interval can be selected in the profiler of choice. - #[arg(short, long, default_value = "regular")] + #[clap(short, long, default_value = "regular")] mode: BenchmarkMode, } diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index eb3bb1d3fccd7..22c5980fd6524 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use kvdb::{DBKeyValue, DBTransaction, KeyValueDB}; +use kvdb::{DBTransaction, KeyValueDB}; use kvdb_rocksdb::{Database, DatabaseConfig}; use std::{io, path::PathBuf, sync::Arc}; @@ -38,7 +38,7 @@ impl KeyValueDB for ParityDbWrapper { } /// Get a value by partial key. Only works for flushed data. - fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> io::Result>> { + fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> Option> { unimplemented!() } @@ -56,7 +56,7 @@ impl KeyValueDB for ParityDbWrapper { } /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, _col: u32) -> Box> + 'a> { + fn iter<'a>(&'a self, _col: u32) -> Box, Box<[u8]>)> + 'a> { unimplemented!() } @@ -65,7 +65,12 @@ impl KeyValueDB for ParityDbWrapper { &'a self, _col: u32, _prefix: &'a [u8], - ) -> Box> + 'a> { + ) -> Box, Box<[u8]>)> + 'a> { + unimplemented!() + } + + /// Attempt to replace this database with a new one located at the given path. + fn restore(&self, _new_db: &str) -> io::Result<()> { unimplemented!() } } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index de49a6fe7b6da..d508dc712e1c3 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -23,7 +23,7 @@ use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _}; +use sp_trie::{trie_types::TrieDBMutV1, TrieMut as _}; use std::{borrow::Cow, collections::HashMap, sync::Arc}; use node_primitives::Hash; @@ -180,7 +180,7 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackendBuilder::new(storage, self.root).build(); + let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_backend .storage(&warmup_key[..]) @@ -286,7 +286,8 @@ impl core::Benchmark for TrieWriteBenchmark { let mut overlay = HashMap::new(); let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; - let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&mut trie, &mut new_root).build(); + let mut trie_db_mut = TrieDBMutV1::from_existing(&mut trie, &mut new_root) + .expect("Failed to create TrieDBMut"); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_db_mut diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index ba372fb10d5ee..1e1670d133359 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -34,12 +34,12 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -array-bytes = "4.1" -clap = { version = "4.0.9", features = ["derive"], optional = true } +clap = { version = "3.1.18", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } jsonrpsee = { version = "0.15.1", features = ["server"] } futures = "0.3.21" +hex-literal = "0.3.4" log = "0.4.17" rand = "0.8" @@ -66,7 +66,6 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } -sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-uncles = { version = "0.10.0-dev", path = "../../../client/consensus/uncles" } @@ -99,7 +98,7 @@ sc-cli = { version = "0.10.0-dev", optional = true, path = "../../../client/cli" frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } -serde_json = "1.0.85" +serde_json = "1.0.79" [target.'cfg(any(target_arch="x86_64", target_arch="aarch64"))'.dependencies] node-executor = { version = "3.0.0-dev", path = "../executor", features = ["wasmtime"] } @@ -126,19 +125,19 @@ tempfile = "3.1.0" assert_cmd = "2.0.2" nix = "0.23" serde_json = "1.0" -regex = "1.6.0" +regex = "1.5.5" platforms = "2.0" async-std = { version = "1.11.0", features = ["attributes"] } soketto = "0.7.1" criterion = { version = "0.3.5", features = ["async_tokio"] } tokio = { version = "1.17.0", features = ["macros", "time", "parking_lot"] } wait-timeout = "0.2" -substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } +remote-externalities = { path = "../../../utils/frame/remote-externalities" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } [build-dependencies] -clap = { version = "4.0.9", optional = true } -clap_complete = { version = "4.0.2", optional = true } +clap = { version = "3.1.18", optional = true } +clap_complete = { version = "3.0", optional = true } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } @@ -150,6 +149,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } [features] default = ["cli"] cli = [ + "node-executor/wasmi-errno", "node-inspect", "sc-cli", "frame-benchmarking-cli", @@ -160,13 +160,10 @@ cli = [ "substrate-build-script-utils", "try-runtime-cli", ] -runtime-benchmarks = [ - "kitchensink-runtime/runtime-benchmarks", - "frame-benchmarking-cli/runtime-benchmarks" -] +runtime-benchmarks = ["kitchensink-runtime/runtime-benchmarks", "frame-benchmarking-cli"] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli/try-runtime"] +try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli"] [[bench]] name = "transaction_pool" diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 4fcebb123d9e3..6d269ccaac271 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -28,7 +28,7 @@ use sc_consensus::{ }; use sc_service::{ config::{ - BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, Configuration, Role, @@ -72,9 +72,10 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - trie_cache_maximum_size: Some(64 * 1024 * 1024), + state_cache_size: 67108864, + state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::KeepAll, + keep_blocks: KeepBlocks::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, @@ -122,7 +123,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { kitchensink_runtime::UncheckedExtrinsic { signature: None, - function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), + function: kitchensink_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), } .into() } diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index a8839642ddc26..580a10d6a6678 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -26,7 +26,7 @@ use node_primitives::AccountId; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ - BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, TransactionPoolOptions, WasmExecutionMethod, }, BasePath, Configuration, Role, @@ -66,9 +66,10 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - trie_cache_maximum_size: Some(64 * 1024 * 1024), + state_cache_size: 67108864, + state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::KeepAll, + keep_blocks: KeepBlocks::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index e8142b297f1b2..6a3d13dda6a00 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -25,7 +25,7 @@ fn main() { mod cli { include!("src/cli.rs"); - use clap::{CommandFactory, ValueEnum}; + use clap::{ArgEnum, CommandFactory}; use clap_complete::{generate_to, Shell}; use std::{env, fs, path::Path}; use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 8d74f2bde0f44..77e2f73dd6e18 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -19,6 +19,7 @@ //! Substrate chain configurations. use grandpa_primitives::AuthorityId as GrandpaId; +use hex_literal::hex; use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, @@ -97,83 +98,84 @@ fn staging_testnet_config_genesis() -> GenesisConfig { )> = vec![ ( // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy - array_bytes::hex_n_into_unchecked("9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"), + hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq - array_bytes::hex_n_into_unchecked("781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"), + hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC - array_bytes::hex2array_unchecked("9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332") + hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"] .unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106") + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] .unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106") + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] .unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106") + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] .unchecked_into(), ), ( // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 - array_bytes::hex_n_into_unchecked("68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"), + hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF - array_bytes::hex_n_into_unchecked("c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"), + hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE - array_bytes::hex2array_unchecked("7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f") + hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"] .unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e") + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] .unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e") + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] .unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e") + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] .unchecked_into(), ), ( // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp - array_bytes::hex_n_into_unchecked("547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"), + hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 - array_bytes::hex_n_into_unchecked("9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"), + hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d - array_bytes::hex2array_unchecked("5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440") + hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"] .unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a") + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] .unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a") + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] .unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a") + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] .unchecked_into(), ), ( // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 - array_bytes::hex_n_into_unchecked("f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"), + hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn - array_bytes::hex_n_into_unchecked("66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"), + hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 - array_bytes::hex2array_unchecked("3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef") + hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"] .unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378") + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] .unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378") + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] .unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378") + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] .unchecked_into(), ), ]; // generated with secret: subkey inspect "$secret"/fir - let root_key: AccountId = array_bytes::hex_n_into_unchecked( + let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo - "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809", - ); + "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" + ] + .into(); let endowed_accounts: Vec = vec![root_key.clone()]; diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index bb7f8a4c60aa9..5b2977599bab0 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -20,7 +20,7 @@ #[derive(Debug, clap::Parser)] pub struct Cli { /// Possible subcommand with parameters. - #[command(subcommand)] + #[clap(subcommand)] pub subcommand: Option, #[allow(missing_docs)] @@ -34,7 +34,7 @@ pub struct Cli { /// /// The results are then printed out in the logs, and also sent as part of /// telemetry, if telemetry is enabled. - #[arg(long)] + #[clap(long)] pub no_hardware_benchmarks: bool, } @@ -42,7 +42,7 @@ pub struct Cli { #[derive(Debug, clap::Subcommand)] pub enum Subcommand { /// The custom inspect subcommmand for decoding blocks and extrinsics. - #[command( + #[clap( name = "inspect", about = "Decode given block or extrinsic using current native runtime." )] @@ -50,7 +50,7 @@ pub enum Subcommand { /// Sub-commands concerned with benchmarking. /// The pallet benchmarking moved to the `pallet` sub-command. - #[command(subcommand)] + #[clap(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), /// Try some command against runtime state. @@ -62,7 +62,7 @@ pub enum Subcommand { TryRuntime, /// Key management cli utilities - #[command(subcommand)] + #[clap(subcommand)] Key(sc_cli::KeySubcommand), /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 108d7743843b6..85e5415dbe139 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -115,56 +115,35 @@ pub fn run() -> Result<()> { cmd.run::(config) }, BenchmarkCmd::Block(cmd) => { - // ensure that we keep the task manager alive - let partial = new_partial(&config)?; - cmd.run(partial.client) + let PartialComponents { client, .. } = new_partial(&config)?; + cmd.run(client) }, - #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => Err( - "Storage benchmarking can be enabled with `--features runtime-benchmarks`." - .into(), - ), - #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => { - // ensure that we keep the task manager alive - let partial = new_partial(&config)?; - let db = partial.backend.expose_db(); - let storage = partial.backend.expose_storage(); + let PartialComponents { client, backend, .. } = new_partial(&config)?; + let db = backend.expose_db(); + let storage = backend.expose_storage(); - cmd.run(config, partial.client, db, storage) + cmd.run(config, client, db, storage) }, BenchmarkCmd::Overhead(cmd) => { - // ensure that we keep the task manager alive - let partial = new_partial(&config)?; - let ext_builder = RemarkBuilder::new(partial.client.clone()); - - cmd.run( - config, - partial.client, - inherent_benchmark_data()?, - Vec::new(), - &ext_builder, - ) + let PartialComponents { client, .. } = new_partial(&config)?; + let ext_builder = RemarkBuilder::new(client.clone()); + + cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) }, BenchmarkCmd::Extrinsic(cmd) => { - // ensure that we keep the task manager alive - let partial = service::new_partial(&config)?; + let PartialComponents { client, .. } = service::new_partial(&config)?; // Register the *Remark* and *TKA* builders. let ext_factory = ExtrinsicFactory(vec![ - Box::new(RemarkBuilder::new(partial.client.clone())), + Box::new(RemarkBuilder::new(client.clone())), Box::new(TransferKeepAliveBuilder::new( - partial.client.clone(), + client.clone(), Sr25519Keyring::Alice.to_account_id(), ExistentialDeposit::get(), )), ]); - cmd.run( - partial.client, - inherent_benchmark_data()?, - Vec::new(), - &ext_factory, - ) + cmd.run(client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 6c29f0c08ee13..e0644f462cf20 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -26,11 +26,10 @@ use futures::prelude::*; use kitchensink_runtime::RuntimeApi; use node_executor::ExecutorDispatch; use node_primitives::Block; -use sc_client_api::BlockBackend; +use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; -use sc_network::NetworkService; -use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; +use sc_network::{Event, NetworkService}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; @@ -69,7 +68,7 @@ pub fn fetch_nonce(client: &FullClient, account: sp_core::sr25519::Pair) -> u32 pub fn create_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, - function: impl Into, + function: impl Into, nonce: Option, ) -> kitchensink_runtime::UncheckedExtrinsic { let function = function.into(); @@ -199,7 +198,7 @@ pub fn new_partial( let justification_import = grandpa_block_import.clone(); let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::configuration(&*client)?, + sc_consensus_babe::Config::get(&*client)?, grandpa_block_import, client.clone(), )?; @@ -223,10 +222,11 @@ pub fn new_partial( let uncles = sp_authorship::InherentDataProvider::<::Header>::check_inherents(); - Ok((slot, timestamp, uncles)) + Ok((timestamp, slot, uncles)) }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), telemetry.as_ref().map(|x| x.handle()), )?; @@ -354,7 +354,7 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter) = + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -392,7 +392,6 @@ pub fn new_full_base( transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, system_rpc_tx, - tx_handler_controller, telemetry: telemetry.as_mut(), })?; @@ -422,6 +421,9 @@ pub fn new_full_base( telemetry.as_ref().map(|x| x.handle()), ); + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + let client_clone = client.clone(); let slot_duration = babe_link.config().slot_duration(); let babe_config = sc_consensus_babe::BabeParams { @@ -454,12 +456,13 @@ pub fn new_full_base( &parent, )?; - Ok((slot, timestamp, uncles, storage_proof)) + Ok((timestamp, slot, uncles, storage_proof)) } }, force_authoring, backoff_authoring_blocks, babe_link, + can_author_with, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), @@ -566,7 +569,7 @@ mod tests { use codec::Encode; use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, - Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, + Address, BalancesCall, Call, UncheckedExtrinsic, }; use node_primitives::{Block, DigestItem, Signature}; use sc_client_api::BlockBackend; @@ -588,7 +591,7 @@ mod tests { RuntimeAppPublic, }; use sp_timestamp; - use std::sync::Arc; + use std::{borrow::Cow, sync::Arc}; type AccountPublic = ::Signer; @@ -678,7 +681,10 @@ mod tests { .epoch_changes() .shared_data() .epoch_data(&epoch_descriptor, |slot| { - sc_consensus_babe::Epoch::genesis(babe_link.config(), slot) + sc_consensus_babe::Epoch::genesis( + babe_link.config().genesis_config(), + slot, + ) }) .unwrap(); @@ -734,9 +740,9 @@ mod tests { let mut params = BlockImportParams::new(BlockOrigin::File, new_header); params.post_digests.push(item); params.body = Some(new_body); - params.insert_intermediate( - INTERMEDIATE_KEY, - BabeIntermediate:: { epoch_descriptor }, + params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); params.fork_choice = Some(ForkChoiceStrategy::LongestChain); @@ -755,10 +761,8 @@ mod tests { }; let signer = charlie.clone(); - let function = RuntimeCall::Balances(BalancesCall::transfer { - dest: to.into(), - value: amount, - }); + let function = + Call::Balances(BalancesCall::transfer { dest: to.into(), value: amount }); let check_non_zero_sender = frame_system::CheckNonZeroSender::new(); let check_spec_version = frame_system::CheckSpecVersion::new(); diff --git a/bin/node/cli/tests/benchmark_storage_works.rs b/bin/node/cli/tests/benchmark_storage_works.rs index 82d1c943ae7aa..30f860e48459f 100644 --- a/bin/node/cli/tests/benchmark_storage_works.rs +++ b/bin/node/cli/tests/benchmark_storage_works.rs @@ -47,7 +47,6 @@ fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus { .args(["--state-version", "1"]) .args(["--warmups", "0"]) .args(["--add", "100", "--mul", "1.2", "--metric", "p75"]) - .arg("--include-child-trees") .status() .unwrap() } diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 358c09779d59a..9c739c2cf2d28 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -23,7 +23,8 @@ use nix::{ sys::signal::{kill, Signal::SIGINT}, unistd::Pid, }; -use node_primitives::{Hash, Header}; +use node_primitives::Block; +use remote_externalities::rpc_api; use std::{ io::{BufRead, BufReader, Read}, ops::{Deref, DerefMut}, @@ -68,14 +69,11 @@ pub async fn wait_n_finalized_blocks( /// Wait for at least n blocks to be finalized from a specified node pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { - use substrate_rpc_client::{ws_client, ChainApi}; - let mut built_blocks = std::collections::HashSet::new(); let mut interval = tokio::time::interval(Duration::from_secs(2)); - let rpc = ws_client(url).await.unwrap(); loop { - if let Ok(block) = ChainApi::<(), Hash, Header, ()>::finalized_head(&rpc).await { + if let Ok(block) = rpc_api::get_finalized_head::(url.to_string()).await { built_blocks.insert(block); if built_blocks.len() > n { break diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 485908af518fe..25036d2d0d04c 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -47,6 +47,7 @@ sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [features] wasmtime = ["sc-executor/wasmtime"] +wasmi-errno = ["sc-executor/wasmi-errno"] stress-test = [] [[bench]] diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 850be3e3c6281..a1d31a5a966db 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -19,8 +19,8 @@ use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; use kitchensink_runtime::{ - constants::currency::*, Block, BuildStorage, CheckedExtrinsic, GenesisConfig, Header, - RuntimeCall, UncheckedExtrinsic, + constants::currency::*, Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, + UncheckedExtrinsic, }; use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; @@ -31,6 +31,7 @@ use sc_executor::{Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmE use sp_core::{ storage::well_known_keys, traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; @@ -111,24 +112,46 @@ fn construct_block( // execute the block to get the real header. executor - .call(ext, &runtime_code, "Core_initialize_block", &header.encode(), true) + .call:: _>( + ext, + &runtime_code, + "Core_initialize_block", + &header.encode(), + true, + None, + ) .0 .unwrap(); for i in extrinsics.iter() { executor - .call(ext, &runtime_code, "BlockBuilder_apply_extrinsic", &i.encode(), true) + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_apply_extrinsic", + &i.encode(), + true, + None, + ) .0 .unwrap(); } - let header = Header::decode( - &mut &executor - .call(ext, &runtime_code, "BlockBuilder_finalize_block", &[0u8; 0], true) - .0 - .unwrap()[..], - ) - .unwrap(); + let header = match executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, + ) + .0 + .unwrap() + { + NativeOrEncoded::Native(_) => unreachable!(), + NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), + }; let hash = header.blake2_256(); (Block { header, extrinsics }.encode(), hash.into()) @@ -141,11 +164,11 @@ fn test_blocks( let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: 0 }), }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { signed: Some((alice(), signed_extra(i, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: Call::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 1 * DOLLARS, }), @@ -195,12 +218,13 @@ fn bench_execute_block(c: &mut Criterion) { |test_ext| { for block in blocks.iter() { executor - .call( + .call:: _>( &mut test_ext.ext(), &runtime_code, "Core_execute_block", &block.0, use_native, + None, ) .0 .unwrap(); diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index fc4e138faafc2..27e848a281097 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -17,20 +17,19 @@ use codec::{Decode, Encode, Joiner}; use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo}, traits::Currency, - weights::Weight, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; -use sp_core::{storage::well_known_keys, traits::Externalities}; +use sp_core::{storage::well_known_keys, traits::Externalities, NeverNativeValue}; use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, - Balances, CheckedExtrinsic, Header, Runtime, RuntimeCall, RuntimeEvent, System, - TransactionPayment, UncheckedExtrinsic, + Balances, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, + UncheckedExtrinsic, }; use node_primitives::{Balance, Hash}; use node_testing::keyring::*; @@ -68,7 +67,7 @@ fn transfer_fee(extrinsic: &E) -> Balance { fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: RuntimeCall::Balances(default_transfer_call()), + function: Call::Balances(default_transfer_call()), }) } @@ -85,11 +84,11 @@ fn changes_trie_block() -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: Call::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 69 * DOLLARS, }), @@ -112,11 +111,11 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: Call::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 69 * DOLLARS, }), @@ -128,22 +127,22 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { let block2 = construct_block( &mut t, 2, - block1.1, + block1.1.clone(), vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { signed: Some((bob(), signed_extra(0, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: Call::Balances(pallet_balances::Call::transfer { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(1, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: Call::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 15 * DOLLARS, }), @@ -167,11 +166,11 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(nonce, 0))), - function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }), + function: Call::System(frame_system::Call::remark { remark: vec![0; size] }), }, ], (time * 1000 / SLOT_DURATION).into(), @@ -188,14 +187,25 @@ fn panic_execution_with_foreign_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; assert!(r.is_ok()); - let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) - .0 - .unwrap(); - let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); + let v = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0 + .unwrap(); + let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -209,14 +219,25 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; assert!(r.is_ok()); - let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) - .0 - .unwrap(); - let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); + let v = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0 + .unwrap(); + let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -245,14 +266,26 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -286,14 +319,26 @@ fn successful_execution_with_foreign_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -311,21 +356,20 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - let transfer_weight = default_transfer_call().get_dispatch_info().weight.saturating_add( - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic, - ); + let transfer_weight = default_transfer_call().get_dispatch_info().weight; let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } .get_dispatch_info() - .weight - .saturating_add( - ::BlockWeights::get() - .get(DispatchClass::Mandatory) - .base_extrinsic, - ); + .weight; - executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + true, + None, + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -334,7 +378,7 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { + event: Event::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, @@ -345,7 +389,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw { + event: Event::Balances(pallet_balances::Event::Withdraw { who: alice().into(), amount: fees, }), @@ -353,7 +397,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: alice().into(), to: bob().into(), amount: 69 * DOLLARS, @@ -362,7 +406,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { + event: Event::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), amount: fees * 8 / 10, }), @@ -370,14 +414,12 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, - }), + event: Event::Treasury(pallet_treasury::Event::Deposit { value: fees * 8 / 10 }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::TransactionPayment( + event: Event::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), actual_fee: fees, @@ -388,7 +430,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { + event: Event::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, }), topics: vec![], @@ -399,7 +441,15 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block2.0, + true, + None, + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!( @@ -410,7 +460,7 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { + event: Event::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, @@ -421,7 +471,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw { + event: Event::Balances(pallet_balances::Event::Withdraw { who: bob().into(), amount: fees, }), @@ -429,7 +479,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: bob().into(), to: alice().into(), amount: 5 * DOLLARS, @@ -438,7 +488,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { + event: Event::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), amount: fees * 8 / 10, }), @@ -446,14 +496,12 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, - }), + event: Event::Treasury(pallet_treasury::Event::Deposit { value: fees * 8 / 10 }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::TransactionPayment( + event: Event::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: bob().into(), actual_fee: fees, @@ -464,14 +512,14 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { + event: Event::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw { + event: Event::Balances(pallet_balances::Event::Withdraw { who: alice().into(), amount: fees, }), @@ -479,7 +527,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: alice().into(), to: bob().into(), amount: 15 * DOLLARS, @@ -488,7 +536,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { + event: Event::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), amount: fees * 8 / 10, }), @@ -496,14 +544,12 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, - }), + event: Event::Treasury(pallet_treasury::Event::Deposit { value: fees * 8 / 10 }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::TransactionPayment( + event: Event::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), actual_fee: fees, @@ -514,7 +560,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { + event: Event::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, }), topics: vec![], @@ -533,7 +579,15 @@ fn full_wasm_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block1.0, false).0.unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + false, + None, + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -543,7 +597,15 @@ fn full_wasm_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block2.0, false).0.unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block2.0, + false, + None, + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!( @@ -664,27 +726,27 @@ fn deploying_wasm_contract_should_work() { vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::< - Runtime, - > { - value: 0, - gas_limit: Weight::from_ref_time(500_000_000), - storage_deposit_limit: None, - code: transfer_code, - data: Vec::new(), - salt: Vec::new(), - }), + function: Call::Contracts( + pallet_contracts::Call::instantiate_with_code:: { + value: 0, + gas_limit: 500_000_000, + storage_deposit_limit: None, + code: transfer_code, + data: Vec::new(), + salt: Vec::new(), + }, + ), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: RuntimeCall::Contracts(pallet_contracts::Call::call:: { + function: Call::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, - gas_limit: Weight::from_ref_time(500_000_000), + gas_limit: 500_000_000, storage_deposit_limit: None, data: vec![0x00, 0x01, 0x02, 0x03], }), @@ -695,7 +757,9 @@ fn deploying_wasm_contract_should_work() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call(&mut t, "Core_execute_block", &b.0, false).0.unwrap(); + executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None) + .0 + .unwrap(); t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items @@ -714,8 +778,14 @@ fn wasm_big_block_import_fails() { set_heap_pages(&mut t.ext(), 4); - let result = - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false).0; + let result = executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + false, + None, + ) + .0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -723,9 +793,15 @@ fn wasm_big_block_import_fails() { fn native_big_block_import_succeeds() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, true) - .0 - .unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + true, + None, + ) + .0 + .unwrap(); } #[test] @@ -736,11 +812,15 @@ fn native_big_block_import_fails_on_fallback() { // block. set_heap_pages(&mut t.ext(), 8); - assert!( - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false,) - .0 - .is_err() - ); + assert!(executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + false, + None, + ) + .0 + .is_err()); } #[test] @@ -757,17 +837,25 @@ fn panic_execution_gives_error() { t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call( + let r = executor_call:: _>( &mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), false, + None, ) .0; assert!(r.is_ok()); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) - .0 - .unwrap(); + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + false, + None, + ) + .0 + .unwrap() + .into_encoded(); let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -797,11 +885,12 @@ fn successful_execution_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call( + let r = executor_call:: _>( &mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), false, + None, ) .0; assert!(r.is_ok()); @@ -811,9 +900,16 @@ fn successful_execution_gives_ok() { let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) - .0 - .unwrap(); + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + false, + None, + ) + .0 + .unwrap() + .into_encoded(); ApplyExtrinsicResult::decode(&mut &r[..]) .unwrap() .expect("Extrinsic could not be applied") diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 803ec78329eea..407a1e09f8efb 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -27,6 +27,7 @@ use sp_core::{ crypto::KeyTypeId, sr25519::Signature, traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::{ traits::{BlakeTwo256, Header as HeaderT}, @@ -98,12 +99,17 @@ pub fn executor() -> NativeElseWasmExecutor { NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8, 2) } -pub fn executor_call( +pub fn executor_call< + R: Decode + Encode + PartialEq, + NC: FnOnce() -> std::result::Result> + + std::panic::UnwindSafe, +>( t: &mut TestExternalities, method: &str, data: &[u8], use_native: bool, -) -> (Result>, bool) { + native_call: Option, +) -> (Result>, bool) { let mut t = t.ext(); let code = t.storage(sp_core::storage::well_known_keys::CODE).unwrap(); @@ -114,7 +120,7 @@ pub fn executor_call( heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), }; sp_tracing::try_init_simple(); - executor().call(&mut t, &runtime_code, method, data, use_native) + executor().call::(&mut t, &runtime_code, method, data, use_native, native_call) } pub fn new_test_ext(code: &[u8]) -> TestExternalities { @@ -165,15 +171,29 @@ pub fn construct_block( }; // execute the block to get the real header. - executor_call(env, "Core_initialize_block", &header.encode(), true).0.unwrap(); + executor_call:: _>( + env, + "Core_initialize_block", + &header.encode(), + true, + None, + ) + .0 + .unwrap(); for extrinsic in extrinsics.iter() { // Try to apply the `extrinsic`. It should be valid, in the sense that it passes // all pre-inclusion checks. - let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode(), true) - .0 - .expect("application of an extrinsic failed"); - + let r = executor_call:: _>( + env, + "BlockBuilder_apply_extrinsic", + &extrinsic.encode(), + true, + None, + ) + .0 + .expect("application of an extrinsic failed") + .into_encoded(); match ApplyExtrinsicResult::decode(&mut &r[..]) .expect("apply result deserialization failed") { @@ -182,10 +202,19 @@ pub fn construct_block( } } - let header = Header::decode( - &mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0], true).0.unwrap()[..], + let header = match executor_call:: _>( + env, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, ) - .unwrap(); + .0 + .unwrap() + { + NativeOrEncoded::Native(_) => unreachable!(), + NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), + }; let hash = header.blake2_256(); (Block { header, extrinsics }.encode(), hash.into()) diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 6932cb2cea867..008ed5f53927b 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -17,17 +17,16 @@ use codec::{Encode, Joiner}; use frame_support::{ - dispatch::GetDispatchInfo, traits::Currency, - weights::{constants::ExtrinsicBaseWeight, IdentityFee, WeightToFee}, + weights::{constants::ExtrinsicBaseWeight, GetDispatchInfo, IdentityFee, WeightToFee}, }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, - Balances, CheckedExtrinsic, Multiplier, Runtime, RuntimeCall, TransactionByteFee, - TransactionPayment, + Balances, Call, CheckedExtrinsic, Multiplier, Runtime, TransactionByteFee, TransactionPayment, }; use node_primitives::Balance; use node_testing::keyring::*; +use sp_core::NeverNativeValue; use sp_runtime::{traits::One, Perbill}; pub mod common; @@ -55,12 +54,12 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { - call: Box::new(RuntimeCall::System(frame_system::Call::fill_block { + function: Call::Sudo(pallet_sudo::Call::sudo { + call: Box::new(Call::System(frame_system::Call::fill_block { ratio: Perbill::from_percent(60), })), }), @@ -74,15 +73,15 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { let block2 = construct_block( &mut tt, 2, - block1.1, + block1.1.clone(), vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }), + function: Call::System(frame_system::Call::remark { remark: vec![0; 1] }), }, ], (time2 / SLOT_DURATION).into(), @@ -95,7 +94,15 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { ); // execute a big block. - executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + true, + None, + ) + .0 + .unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -106,7 +113,15 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }); // execute a big block. - executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block2.0, + true, + None, + ) + .0 + .unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -148,15 +163,27 @@ fn transaction_fee_is_correct() { let tip = 1_000_000; let xt = sign(CheckedExtrinsic { signed: Some((alice(), signed_extra(0, tip))), - function: RuntimeCall::Balances(default_transfer_call()), + function: Call::Balances(default_transfer_call()), }); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; assert!(r.is_ok()); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone()), true).0; + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt.clone()), + true, + None, + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -181,7 +208,7 @@ fn transaction_fee_is_correct() { // we know that weight to fee multiplier is effect-less in block 1. // current weight of transfer = 200_000_000 // Linear weight to fee is 1:1 right now (1 weight = 1 unit of balance) - assert_eq!(weight_fee, weight.ref_time() as Balance); + assert_eq!(weight_fee, weight as Balance); balance_alice -= base_fee; balance_alice -= weight_fee; balance_alice -= tip; @@ -214,7 +241,7 @@ fn block_weight_capacity_report() { let mut xts = (0..num_transfers) .map(|i| CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer { + function: Call::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 0, }), @@ -225,7 +252,7 @@ fn block_weight_capacity_report() { 0, CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, ); @@ -247,7 +274,14 @@ fn block_weight_capacity_report() { len / 1024 / 1024, ); - let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; + let r = executor_call:: _>( + &mut t, + "Core_execute_block", + &block.0, + true, + None, + ) + .0; println!(" || Result = {:?}", r); assert!(r.is_ok()); @@ -288,13 +322,11 @@ fn block_length_capacity_report() { vec![ CheckedExtrinsic { signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { - now: time * 1000, - }), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce, 0))), - function: RuntimeCall::System(frame_system::Call::remark { + function: Call::System(frame_system::Call::remark { remark: vec![0u8; (block_number * factor) as usize], }), }, @@ -310,7 +342,14 @@ fn block_length_capacity_report() { len / 1024 / 1024, ); - let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; + let r = executor_call:: _>( + &mut t, + "Core_execute_block", + &block.0, + true, + None, + ) + .0; println!(" || Result = {:?}", r); assert!(r.is_ok()); diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index b7eccf9c36bd2..c41681d11be1e 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -2,7 +2,6 @@ name = "node-inspect" version = "0.9.0-dev" authors = ["Parity Technologies "] -description = "Substrate node block inspection tool." edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" @@ -12,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.6", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } thiserror = "1.0" sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } diff --git a/bin/node/inspect/src/cli.rs b/bin/node/inspect/src/cli.rs index fa6344ac346e9..cc1f232e1fe0f 100644 --- a/bin/node/inspect/src/cli.rs +++ b/bin/node/inspect/src/cli.rs @@ -46,7 +46,7 @@ pub enum InspectSubCmd { /// Can be either a block hash (no 0x prefix) or a number to retrieve existing block, /// or a 0x-prefixed bytes hex string, representing SCALE encoding of /// a block. - #[arg(value_name = "HASH or NUMBER or BYTES")] + #[clap(value_name = "HASH or NUMBER or BYTES")] input: String, }, /// Decode extrinsic with native version of runtime and print out the details. @@ -56,7 +56,7 @@ pub enum InspectSubCmd { /// Can be either a block hash (no 0x prefix) or number and the index, in the form /// of `{block}:{index}` or a 0x-prefixed bytes hex string, /// representing SCALE encoding of an extrinsic. - #[arg(value_name = "BLOCK:INDEX or BYTES")] + #[clap(value_name = "BLOCK:INDEX or BYTES")] input: String, }, } diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 528dce14f46a5..b37c5aa7ca2e8 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -140,11 +140,10 @@ impl> Inspector BlockAddress::Bytes(bytes) => TBlock::decode(&mut &*bytes)?, BlockAddress::Number(number) => { let id = BlockId::number(number); - let hash = self.chain.expect_block_hash_from_id(&id)?; let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(hash)? + .block_body(&id)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; @@ -155,7 +154,7 @@ impl> Inspector let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(hash)? + .block_body(&id)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; @@ -297,7 +296,7 @@ mod tests { let b2 = ExtrinsicAddress::from_str("0 0"); let b3 = ExtrinsicAddress::from_str("0x0012345f"); - assert_eq!(e0, Ok(ExtrinsicAddress::Bytes(vec![0x12, 0x34]))); + assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); assert_eq!( b0, Ok(ExtrinsicAddress::Block( @@ -306,7 +305,7 @@ mod tests { )) ); assert_eq!(b1, Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0))); - assert_eq!(b2, Ok(ExtrinsicAddress::Bytes(vec![0, 0]))); + assert_eq!(b2, Ok(ExtrinsicAddress::Block(BlockAddress::Number(0), 0))); assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); } } diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 65a4223a7fb9f..bc6fa669ca4ea 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -2,7 +2,6 @@ name = "node-primitives" version = "2.0.0" authors = ["Parity Technologies "] -description = "Substrate node low-level primitives." edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index cc1861acf8ba2..2e70323555a78 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -2,7 +2,6 @@ name = "node-rpc" version = "3.0.0-dev" authors = ["Parity Technologies "] -description = "Substrate node rpc methods." edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" @@ -14,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } +pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } pallet-dex-rpc = { version = "4.0.0-dev", path = "../../../frame/dex/rpc/" } @@ -26,7 +26,6 @@ sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality sc-finality-grandpa-rpc = { version = "0.10.0-dev", path = "../../../client/finality-grandpa/rpc" } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } -sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../../../client/rpc-spec-v2" } sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 865c79acc5e4f..e293d009a0b2c 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -36,7 +36,7 @@ use std::sync::Arc; use jsonrpsee::RpcModule; use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; use sc_client_api::AuxStore; -use sc_consensus_babe::{BabeConfiguration, Epoch}; +use sc_consensus_babe::{Config, Epoch}; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, @@ -54,7 +54,7 @@ use sp_keystore::SyncCryptoStorePtr; /// Extra dependencies for BABE. pub struct BabeDeps { /// BABE protocol config. - pub babe_config: BabeConfiguration, + pub babe_config: Config, /// BABE pending epoch changes. pub shared_epoch_changes: SharedEpochChanges, /// The keystore that manages the keys of the node. @@ -108,11 +108,8 @@ where + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_mmr_rpc::MmrRuntimeApi< - Block, - ::Hash, - BlockNumber, - >, + C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: pallet_dex_rpc::DexRuntimeApi, C::Api: BabeApi, @@ -122,14 +119,13 @@ where B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - // use pallet_contracts_rpc::{Contracts, ContractsApiServer}; + use pallet_contracts_rpc::{Contracts, ContractsApiServer}; use pallet_dex_rpc::{Dex, DexApiServer}; use pallet_mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; use sc_finality_grandpa_rpc::{Grandpa, GrandpaApiServer}; use sc_rpc::dev::{Dev, DevApiServer}; - use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer}; use sc_sync_state_rpc::{SyncState, SyncStateApiServer}; use substrate_frame_rpc_system::{System, SystemApiServer}; use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; @@ -146,15 +142,11 @@ where finality_provider, } = grandpa; - let chain_name = chain_spec.name().to_string(); - let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); - let properties = chain_spec.properties(); - io.merge(ChainSpec::new(chain_name, genesis_hash, properties).into_rpc())?; - io.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. + io.merge(Contracts::new(client.clone()).into_rpc())?; io.merge(Mmr::new(client.clone()).into_rpc())?; io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge(Dex::new(client.clone()).into_rpc())?; diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index bf3e1c93685ed..29425af8def67 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -2,7 +2,6 @@ name = "kitchensink-runtime" version = "3.0.0-dev" authors = ["Parity Technologies "] -description = "Substrate node kitchensink runtime." edition = "2021" build = "build.rs" license = "Apache-2.0" @@ -21,6 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } static_assertions = "1.1.0" +hex-literal = { version = "0.3.4", optional = true } log = { version = "0.4.17", default-features = false } # primitives @@ -61,13 +61,13 @@ pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/child-bounties" } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } pallet-election-provider-support-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-support/benchmarking", optional = true } pallet-elections-phragmen = { version = "5.0.0-dev", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-fast-unstake = { version = "4.0.0-dev", default-features = false, path = "../../../frame/fast-unstake" } pallet-gilt = { version = "4.0.0-dev", default-features = false, path = "../../../frame/gilt" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } @@ -118,13 +118,6 @@ substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-bu default = ["std"] with-tracing = ["frame-executive/with-tracing"] std = [ - "sp-sandbox/std", - "pallet-whitelist/std", - "pallet-offences-benchmarking?/std", - "pallet-election-provider-support-benchmarking?/std", - "pallet-asset-tx-payment/std", - "frame-system-benchmarking?/std", - "frame-election-provider-support/std", "sp-authority-discovery/std", "pallet-assets/std", "pallet-authority-discovery/std", @@ -140,10 +133,10 @@ std = [ "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", + "pallet-contracts-rpc-runtime-api/std", "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", - "pallet-fast-unstake/std", "frame-executive/std", "pallet-gilt/std", "pallet-grandpa/std", @@ -156,7 +149,6 @@ std = [ "pallet-multisig/std", "pallet-nomination-pools/std", "pallet-nomination-pools-runtime-api/std", - "pallet-nomination-pools-benchmarking?/std", "pallet-identity/std", "pallet-scheduler/std", "node-primitives/std", @@ -168,7 +160,6 @@ std = [ "pallet-randomness-collective-flip/std", "sp-std/std", "pallet-session/std", - "pallet-session-benchmarking?/std", "sp-api/std", "sp-runtime/std", "sp-staking/std", @@ -177,7 +168,7 @@ std = [ "sp-session/std", "pallet-sudo/std", "frame-support/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-election-provider-multi-phase/std", @@ -198,7 +189,7 @@ std = [ "pallet-uniques/std", "pallet-vesting/std", "log/std", - "frame-try-runtime?/std", + "frame-try-runtime/std", "sp-io/std", "pallet-child-bounties/std", "pallet-alliance/std", @@ -224,7 +215,6 @@ runtime-benchmarks = [ "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", - "pallet-fast-unstake/runtime-benchmarks", "pallet-gilt/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", @@ -234,8 +224,8 @@ runtime-benchmarks = [ "pallet-membership/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", - "pallet-nomination-pools-benchmarking/runtime-benchmarks", - "pallet-offences-benchmarking/runtime-benchmarks", + "pallet-nomination-pools-benchmarking", + "pallet-offences-benchmarking", "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", @@ -243,7 +233,7 @@ runtime-benchmarks = [ "pallet-referenda/runtime-benchmarks", "pallet-recovery/runtime-benchmarks", "pallet-remark/runtime-benchmarks", - "pallet-session-benchmarking/runtime-benchmarks", + "pallet-session-benchmarking", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", @@ -255,19 +245,18 @@ runtime-benchmarks = [ "pallet-uniques/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", + "frame-system-benchmarking", + "hex-literal", ] try-runtime = [ - "frame-try-runtime/try-runtime", "frame-executive/try-runtime", + "frame-try-runtime", "frame-system/try-runtime", - "frame-support/try-runtime", "pallet-alliance/try-runtime", "pallet-assets/try-runtime", "pallet-authority-discovery/try-runtime", "pallet-authorship/try-runtime", "pallet-babe/try-runtime", - "pallet-bags-list/try-runtime", "pallet-balances/try-runtime", "pallet-bounties/try-runtime", "pallet-child-bounties/try-runtime", @@ -277,39 +266,34 @@ try-runtime = [ "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", - "pallet-fast-unstake/try-runtime", "pallet-gilt/try-runtime", "pallet-grandpa/try-runtime", + "pallet-identity/try-runtime", "pallet-im-online/try-runtime", "pallet-indices/try-runtime", - "pallet-identity/try-runtime", "pallet-lottery/try-runtime", "pallet-membership/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", - "pallet-nomination-pools/try-runtime", "pallet-offences/try-runtime", "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", - "pallet-randomness-collective-flip/try-runtime", "pallet-ranked-collective/try-runtime", + "pallet-randomness-collective-flip/try-runtime", "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", - "pallet-remark/try-runtime", + "pallet-scheduler/try-runtime", "pallet-session/try-runtime", + "pallet-society/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", - "pallet-scheduler/try-runtime", - "pallet-society/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-tips/try-runtime", - "pallet-treasury/try-runtime", - "pallet-utility/try-runtime", "pallet-transaction-payment/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-transaction-storage/try-runtime", + "pallet-treasury/try-runtime", "pallet-uniques/try-runtime", + "pallet-utility/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", "pallet-dex/try-runtime", diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 0a5c797ba729f..68c780094208f 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -18,8 +18,7 @@ //! Some configurable implementations as associated type for the substrate runtime. use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, - RuntimeCall, + AccountId, AllianceMotion, Assets, Authorship, Balances, Call, Hash, NegativeImbalance, Runtime, }; use frame_support::{ pallet_prelude::*, @@ -78,11 +77,11 @@ impl IdentityVerifier for AllianceIdentityVerifier { } pub struct AllianceProposalProvider; -impl ProposalProvider for AllianceProposalProvider { +impl ProposalProvider for AllianceProposalProvider { fn propose_proposal( who: AccountId, threshold: u32, - proposal: Box, + proposal: Box, length_bound: u32, ) -> Result<(u32, u32), DispatchError> { AllianceMotion::do_propose_proposed(who, threshold, proposal, length_bound) @@ -110,7 +109,7 @@ impl ProposalProvider for AllianceProposalProvider AllianceMotion::do_close(proposal_hash, proposal_index, proposal_weight_bound, length_bound) } - fn proposal_of(proposal_hash: Hash) -> Option { + fn proposal_of(proposal_hash: Hash) -> Option { AllianceMotion::proposal_of(proposal_hash) } } @@ -126,13 +125,10 @@ mod multiplier_tests { use crate::{ constants::{currency::*, time::*}, - AdjustmentVariable, MaximumMultiplier, MinimumMultiplier, Runtime, - RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment, - }; - use frame_support::{ - dispatch::DispatchClass, - weights::{Weight, WeightToFee}, + AdjustmentVariable, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, + System, TargetBlockFullness, TransactionPayment, }; + use frame_support::weights::{DispatchClass, Weight, WeightToFee}; fn max_normal() -> Weight { BlockWeights::get() @@ -156,7 +152,6 @@ mod multiplier_tests { TargetBlockFullness, AdjustmentVariable, MinimumMultiplier, - MaximumMultiplier, >::convert(fm) } @@ -168,13 +163,13 @@ mod multiplier_tests { let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); // maximum tx weight - let m = max_normal().ref_time() as f64; + let m = max_normal() as f64; // block weight always truncated to max weight - let block_weight = (block_weight.ref_time() as f64).min(m); + let block_weight = (block_weight as f64).min(m); let v: f64 = AdjustmentVariable::get().to_float(); // Ideal saturation in terms of weight - let ss = target().ref_time() as f64; + let ss = target() as f64; // Current saturation in terms of weight let s = block_weight; @@ -202,12 +197,12 @@ mod multiplier_tests { fn truth_value_update_poc_works() { let fm = Multiplier::saturating_from_rational(1, 2); let test_set = vec![ - (Weight::zero(), fm), - (Weight::from_ref_time(100), fm), - (Weight::from_ref_time(1000), fm), - (target(), fm), - (max_normal() / 2, fm), - (max_normal(), fm), + (0, fm.clone()), + (100, fm.clone()), + (1000, fm.clone()), + (target(), fm.clone()), + (max_normal() / 2, fm.clone()), + (max_normal(), fm.clone()), ]; test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { @@ -225,7 +220,7 @@ mod multiplier_tests { fn multiplier_can_grow_from_zero() { // if the min is too small, then this will not change, and we are doomed forever. // the weight is 1/100th bigger than target. - run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || { + run_with_system_weight(target() * 101 / 100, || { let next = runtime_multiplier_update(min_multiplier()); assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier()); }) @@ -234,7 +229,7 @@ mod multiplier_tests { #[test] fn multiplier_cannot_go_below_limit() { // will not go any further below even if block is empty. - run_with_system_weight(Weight::zero(), || { + run_with_system_weight(0, || { let next = runtime_multiplier_update(min_multiplier()); assert_eq!(next, min_multiplier()); }) @@ -252,7 +247,7 @@ mod multiplier_tests { // 1 < 0.00001 * k * 0.1875 // 10^9 / 1875 < k // k > 533_333 ~ 18,5 days. - run_with_system_weight(Weight::zero(), || { + run_with_system_weight(0, || { // start from 1, the default. let mut fm = Multiplier::one(); let mut iterations: u64 = 0; @@ -288,8 +283,7 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - - Weight::from_ref_time(100); + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - 100; // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); @@ -413,27 +407,27 @@ mod multiplier_tests { #[test] fn weight_to_fee_should_not_overflow_on_large_weights() { - let kb = Weight::from_ref_time(1024); - let mb = 1024u64 * kb; + let kb = 1024 as Weight; + let mb = kb * kb; let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ - Weight::zero(), - Weight::from_ref_time(1), - Weight::from_ref_time(10), - Weight::from_ref_time(1000), + 0, + 1, + 10, + 1000, kb, - 10u64 * kb, - 100u64 * kb, + 10 * kb, + 100 * kb, mb, - 10u64 * mb, - Weight::from_ref_time(2147483647), - Weight::from_ref_time(4294967295), + 10 * mb, + 2147483647, + 4294967295, BlockWeights::get().max_block / 2, BlockWeights::get().max_block, - Weight::MAX / 2, - Weight::MAX, + Weight::max_value() / 2, + Weight::max_value(), ] .into_iter() .for_each(|i| { @@ -446,7 +440,7 @@ mod multiplier_tests { // Some values that are all above the target and will cause an increase. let t = target(); - vec![t + Weight::from_ref_time(100), t * 2, t * 4].into_iter().for_each(|i| { + vec![t + 100, t * 2, t * 4].into_iter().for_each(|i| { run_with_system_weight(i, || { let fm = runtime_multiplier_update(max_fm); // won't grow. The convert saturates everything. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 642974aabe53a..e0b7b7d973639 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -28,17 +28,16 @@ use frame_election_provider_support::{ }; use frame_support::{ construct_runtime, - dispatch::DispatchClass, pallet_prelude::Get, parameter_types, traits::{ AsEnsureOriginWithArg, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, + LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - ConstantMultiplier, IdentityFee, Weight, + ConstantMultiplier, DispatchClass, IdentityFee, Weight, }, PalletId, RuntimeDebug, }; @@ -65,7 +64,7 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - self, BlakeTwo256, Block as BlockT, Bounded, ConvertInto, NumberFor, OpaqueKeys, + self, BlakeTwo256, Block as BlockT, ConvertInto, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, @@ -170,8 +169,8 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 2 seconds of compute with a 6 second average block time, with maximum proof size. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2).set_proof_size(u64::MAX); +/// We allow for 2 seconds of compute with a 6 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; parameter_types! { pub const BlockHashCount: BlockNumber = 2400; @@ -205,8 +204,8 @@ impl frame_system::Config for Runtime { type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = Index; type BlockNumber = BlockNumber; type Hash = Hash; @@ -214,7 +213,7 @@ impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = Indices; type Header = generic::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = BlockHashCount; type Version = Version; type PalletInfo = PalletInfo; @@ -230,8 +229,8 @@ impl frame_system::Config for Runtime { impl pallet_randomness_collective_flip::Config for Runtime {} impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type PalletsOrigin = OriginCaller; type WeightInfo = pallet_utility::weights::SubstrateWeight; } @@ -244,12 +243,12 @@ parameter_types! { } impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; - type MaxSignatories = ConstU32<100>; + type MaxSignatories = ConstU16<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; } @@ -287,28 +286,25 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { +impl InstanceFilter for ProxyType { + fn filter(&self, c: &Call) -> bool { match self { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - RuntimeCall::Balances(..) | - RuntimeCall::Assets(..) | - RuntimeCall::Uniques(..) | - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) | - RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) + Call::Balances(..) | + Call::Assets(..) | Call::Uniques(..) | + Call::Vesting(pallet_vesting::Call::vested_transfer { .. }) | + Call::Indices(pallet_indices::Call::transfer { .. }) ), ProxyType::Governance => matches!( c, - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | - RuntimeCall::Society(..) | - RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::Elections(..) | - RuntimeCall::Treasury(..) + Call::Democracy(..) | + Call::Council(..) | Call::Society(..) | + Call::TechnicalCommittee(..) | + Call::Elections(..) | Call::Treasury(..) ), - ProxyType::Staking => matches!(c, RuntimeCall::Staking(..)), + ProxyType::Staking => matches!(c, Call::Staking(..)), } } fn is_superset(&self, o: &Self) -> bool { @@ -323,8 +319,8 @@ impl InstanceFilter for ProxyType { } impl pallet_proxy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type Currency = Balances; type ProxyType = ProxyType; type ProxyDepositBase = ProxyDepositBase; @@ -340,19 +336,22 @@ impl pallet_proxy::Config for Runtime { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; + // Retry a scheduled item every 10 blocks (1 minute) until the preimage exists. + pub const NoPreimagePostponement: Option = Some(10); } impl pallet_scheduler::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; + type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = ConstU32<512>; + type MaxScheduledPerBlock = ConstU32<50>; type WeightInfo = pallet_scheduler::weights::SubstrateWeight; type OriginPrivilegeCmp = EqualPrivilegeOnly; - type Preimages = Preimage; + type PreimageProvider = Preimage; + type NoPreimagePostponement = NoPreimagePostponement; } parameter_types! { @@ -364,9 +363,10 @@ parameter_types! { impl pallet_preimage::Config for Runtime { type WeightInfo = pallet_preimage::weights::SubstrateWeight; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type ManagerOrigin = EnsureRoot; + type MaxSize = PreimageMaxSize; type BaseDeposit = PreimageBaseDeposit; type ByteDeposit = PreimageByteDeposit; } @@ -413,7 +413,7 @@ impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = pallet_indices::weights::SubstrateWeight; } @@ -431,7 +431,7 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type Balance = Balance; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; @@ -443,26 +443,20 @@ parameter_types! { pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(1, 100_000); pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); - pub MaximumMultiplier: Multiplier = Bounded::max_value(); } impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = TargetedFeeAdjustment< - Self, - TargetBlockFullness, - AdjustmentVariable, - MinimumMultiplier, - MaximumMultiplier, - >; + type FeeMultiplierUpdate = + TargetedFeeAdjustment; } impl pallet_asset_tx_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< pallet_assets::BalanceToAssetBalance, @@ -502,7 +496,7 @@ impl_opaque_keys! { } impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; @@ -537,7 +531,6 @@ parameter_types! { pub const MaxNominatorRewardedPerValidator: u32 = 256; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub OffchainRepeat: BlockNumber = 5; - pub HistoryDepth: u32 = 84; } pub struct StakingBenchmarkingConfig; @@ -553,7 +546,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; type RewardRemainder = Treasury; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Slash = Treasury; // send the slashed funds to the treasury. type Reward = (); // rewards are minted from the void type SessionsPerEra = SessionsPerEra; @@ -570,27 +563,14 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; - type GenesisElectionProvider = onchain::OnChainExecution; - type VoterList = VoterList; - // This a placeholder, to be introduced in the next PR as an instance of bags-list - type TargetList = pallet_staking::UseValidatorsMap; + type GenesisElectionProvider = onchain::UnboundedExecution; + type VoterList = BagsList; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = HistoryDepth; type OnStakerSlash = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; } -impl pallet_fast_unstake::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ControlOrigin = frame_system::EnsureRoot; - type BatchSize = ConstU32<128>; - type Deposit = ConstU128<{ DOLLARS }>; - type Currency = Balances; - type Staking = Staking; - type WeightInfo = (); -} - parameter_types! { // phase durations. 1/4 of the last session for each. pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; @@ -628,14 +608,7 @@ frame_election_provider_support::generate_solution_type!( parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; - pub MaxElectingVoters: u32 = 40_000; - pub MaxElectableTargets: u16 = 10_000; - // OnChain values are lower. - pub MaxOnChainElectingVoters: u32 = 5000; - pub MaxOnChainElectableTargets: u16 = 1250; - // The maximum winners that can be elected by the Election pallet which is equivalent to the - // maximum active validators the staking pallet can have. - pub MaxActiveValidators: u32 = 1000; + pub MaxElectingVoters: u32 = 10_000; } /// The numbers configured here could always be more than the the maximum limits of staking pallet @@ -686,9 +659,11 @@ impl onchain::Config for OnChainSeqPhragmen { >; type DataProvider = ::DataProvider; type WeightInfo = frame_election_provider_support::weights::SubstrateWeight; - type MaxWinners = ::MaxWinners; - type VotersBound = MaxOnChainElectingVoters; - type TargetsBound = MaxOnChainElectableTargets; +} + +impl onchain::BoundedConfig for OnChainSeqPhragmen { + type VotersBound = MaxElectingVoters; + type TargetsBound = ConstU32<2_000>; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -711,7 +686,7 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { } impl pallet_election_provider_multi_phase::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type EstimateCallFee = TransactionPayment; type SignedPhase = SignedPhase; @@ -731,12 +706,11 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SlashHandler = (); // burn slashes type RewardHandler = (); // nothing to do upon rewards type DataProvider = Staking; - type Fallback = onchain::OnChainExecution; - type GovernanceFallback = onchain::OnChainExecution; + type Fallback = onchain::BoundedExecution; + type GovernanceFallback = onchain::BoundedExecution; type Solver = SequentialPhragmen, OffchainRandomBalancing>; type ForceOrigin = EnsureRootOrHalfCouncil; - type MaxElectableTargets = MaxElectableTargets; - type MaxWinners = MaxActiveValidators; + type MaxElectableTargets = ConstU16<{ u16::MAX }>; type MaxElectingVoters = MaxElectingVoters; type BenchmarkingConfig = ElectionProviderBenchmarkConfig; type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; @@ -746,15 +720,12 @@ parameter_types! { pub const BagThresholds: &'static [u64] = &voter_bags::THRESHOLDS; } -type VoterBagsListInstance = pallet_bags_list::Instance1; -impl pallet_bags_list::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - /// The voter bags-list is loosely kept up to date, and the real source of truth for the score - /// of each node is the staking pallet. +impl pallet_bags_list::Config for Runtime { + type Event = Event; type ScoreProvider = Staking; + type WeightInfo = pallet_bags_list::weights::SubstrateWeight; type BagThresholds = BagThresholds; type Score = VoteWeight; - type WeightInfo = pallet_bags_list::weights::SubstrateWeight; } parameter_types! { @@ -779,12 +750,13 @@ impl Convert for U256ToBalance { impl pallet_nomination_pools::Config for Runtime { type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; + type CurrencyBalance = Balance; type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakingInterface = pallet_staking::Pallet; type PostUnbondingPoolsWindow = PostUnbondPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; @@ -798,7 +770,7 @@ parameter_types! { impl pallet_conviction_voting::Config for Runtime { type WeightInfo = pallet_conviction_voting::weights::SubstrateWeight; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type VoteLockingPeriod = VoteLockingPeriod; type MaxVotes = ConstU32<512>; @@ -815,7 +787,7 @@ parameter_types! { pub struct TracksInfo; impl pallet_referenda::TracksInfo for TracksInfo { type Id = u16; - type RuntimeOrigin = ::PalletsOrigin; + type Origin = ::PalletsOrigin; fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { static DATA: [(u16, pallet_referenda::TrackInfo); 1] = [( 0u16, @@ -841,7 +813,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { )]; &DATA[..] } - fn track_for(id: &Self::RuntimeOrigin) -> Result { + fn track_for(id: &Self::Origin) -> Result { if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { match system_origin { frame_system::RawOrigin::Root => Ok(0), @@ -852,12 +824,11 @@ impl pallet_referenda::TracksInfo for TracksInfo { } } } -pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); impl pallet_referenda::Config for Runtime { type WeightInfo = pallet_referenda::weights::SubstrateWeight; - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Call = Call; + type Event = Event; type Scheduler = Scheduler; type Currency = pallet_balances::Pallet; type SubmitOrigin = EnsureSigned; @@ -871,13 +842,12 @@ impl pallet_referenda::Config for Runtime { type UndecidingTimeout = UndecidingTimeout; type AlarmInterval = AlarmInterval; type Tracks = TracksInfo; - type Preimages = Preimage; } impl pallet_referenda::Config for Runtime { type WeightInfo = pallet_referenda::weights::SubstrateWeight; - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Call = Call; + type Event = Event; type Scheduler = Scheduler; type Currency = pallet_balances::Pallet; type SubmitOrigin = EnsureSigned; @@ -891,12 +861,11 @@ impl pallet_referenda::Config for Runtime { type UndecidingTimeout = UndecidingTimeout; type AlarmInterval = AlarmInterval; type Tracks = TracksInfo; - type Preimages = Preimage; } impl pallet_ranked_collective::Config for Runtime { type WeightInfo = pallet_ranked_collective::weights::SubstrateWeight; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PromoteOrigin = EnsureRootWithSuccess>; type DemoteOrigin = EnsureRootWithSuccess>; type Polls = RankedPolls; @@ -906,7 +875,7 @@ impl pallet_ranked_collective::Config for Runtime { impl pallet_remark::Config for Runtime { type WeightInfo = pallet_remark::weights::SubstrateWeight; - type RuntimeEvent = RuntimeEvent; + type Event = Event; } parameter_types! { @@ -920,7 +889,8 @@ parameter_types! { } impl pallet_democracy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Proposal = Call; + type Event = Event; type Currency = Balances; type EnactmentPeriod = EnactmentPeriod; type LaunchPeriod = LaunchPeriod; @@ -959,15 +929,14 @@ impl pallet_democracy::Config for Runtime { // only do it once and it lasts only for the cool-off period. type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; + type PreimageByteDeposit = PreimageByteDeposit; + type OperationalPreimageOrigin = pallet_collective::EnsureMember; type Slash = Treasury; type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = ConstU32<100>; type WeightInfo = pallet_democracy::weights::SubstrateWeight; type MaxProposals = MaxProposals; - type Preimages = Preimage; - type MaxDeposits = ConstU32<100>; - type MaxBlacklisted = ConstU32<100>; } parameter_types! { @@ -978,9 +947,9 @@ parameter_types! { type CouncilCollective = pallet_collective::Instance1; impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = CouncilMotionDuration; type MaxProposals = CouncilMaxProposals; type MaxMembers = CouncilMaxMembers; @@ -997,8 +966,6 @@ parameter_types! { pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; - pub const MaxVoters: u32 = 10 * 1000; - pub const MaxCandidates: u32 = 1000; pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } @@ -1006,7 +973,7 @@ parameter_types! { const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); impl pallet_elections_phragmen::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletId = ElectionsPhragmenPalletId; type Currency = Balances; type ChangeMembers = Council; @@ -1022,8 +989,6 @@ impl pallet_elections_phragmen::Config for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; - type MaxVoters = MaxVoters; - type MaxCandidates = MaxCandidates; type WeightInfo = pallet_elections_phragmen::weights::SubstrateWeight; } @@ -1035,9 +1000,9 @@ parameter_types! { type TechnicalCollective = pallet_collective::Instance2; impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = TechnicalMotionDuration; type MaxProposals = TechnicalMaxProposals; type MaxMembers = TechnicalMaxMembers; @@ -1050,7 +1015,7 @@ type EnsureRootOrHalfCouncil = EitherOfDiverse< pallet_collective::EnsureProportionMoreThan, >; impl pallet_membership::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type AddOrigin = EnsureRootOrHalfCouncil; type RemoveOrigin = EnsureRootOrHalfCouncil; type SwapOrigin = EnsureRootOrHalfCouncil; @@ -1087,7 +1052,7 @@ impl pallet_treasury::Config for Runtime { EnsureRoot, pallet_collective::EnsureProportionMoreThan, >; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; @@ -1113,7 +1078,7 @@ parameter_types! { } impl pallet_bounties::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BountyDepositBase = BountyDepositBase; type BountyDepositPayoutDelay = BountyDepositPayoutDelay; type BountyUpdatePeriod = BountyUpdatePeriod; @@ -1132,14 +1097,14 @@ parameter_types! { } impl pallet_child_bounties::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type MaxActiveChildBountyCount = ConstU32<5>; type ChildBountyValueMinimum = ChildBountyValueMinimum; type WeightInfo = pallet_child_bounties::weights::SubstrateWeight; } impl pallet_tips::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = MaximumReasonLength; type Tippers = Elections; @@ -1167,8 +1132,8 @@ impl pallet_contracts::Config for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; /// The safest default is to allow no calls at all. /// /// Runtimes should whitelist dispatchables that are allowed to be called from contracts @@ -1186,13 +1151,15 @@ impl pallet_contracts::Config for Runtime { type DeletionWeightLimit = DeletionWeightLimit; type Schedule = Schedule; type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; + type RelaxedMaxCodeLen = ConstU32<{ 256 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } impl pallet_sudo::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; } parameter_types! { @@ -1207,14 +1174,14 @@ parameter_types! { impl frame_system::offchain::CreateSignedTransaction for Runtime where - RuntimeCall: From, + Call: From, { fn create_transaction>( - call: RuntimeCall, + call: Call, public: ::Signer, account: AccountId, nonce: Index, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option<(Call, ::SignaturePayload)> { let tip = 0; // take the biggest period possible. let period = @@ -1254,15 +1221,15 @@ impl frame_system::offchain::SigningTypes for Runtime { impl frame_system::offchain::SendTransactionTypes for Runtime where - RuntimeCall: From, + Call: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type OverarchingCall = Call; } impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type NextSessionRotation = Babe; type ValidatorSet = Historical; type ReportUnresponsiveness = Offences; @@ -1274,7 +1241,7 @@ impl pallet_im_online::Config for Runtime { } impl pallet_offences::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } @@ -1284,7 +1251,8 @@ impl pallet_authority_discovery::Config for Runtime { } impl pallet_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; + type Call = Call; type KeyOwnerProofSystem = Historical; @@ -1316,7 +1284,7 @@ parameter_types! { } impl pallet_identity::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; type FieldDeposit = FieldDeposit; @@ -1338,9 +1306,9 @@ parameter_types! { } impl pallet_recovery::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = pallet_recovery::weights::SubstrateWeight; - type RuntimeCall = RuntimeCall; + type Call = Call; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; @@ -1361,7 +1329,7 @@ parameter_types! { } impl pallet_society::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletId = SocietyPalletId; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; @@ -1381,17 +1349,14 @@ impl pallet_society::Config for Runtime { parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); } impl pallet_vesting::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = pallet_vesting::weights::SubstrateWeight; - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; // `VestingInfo` encode length is 36bytes. 28 schedules gets encoded as 1009 bytes, which is the // highest number of schedules that encodes less than 2^10. const MAX_VESTING_SCHEDULES: u32 = 28; @@ -1414,10 +1379,10 @@ parameter_types! { impl pallet_lottery::Config for Runtime { type PalletId = LotteryPalletId; - type RuntimeCall = RuntimeCall; + type Call = Call; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ManagerOrigin = EnsureRoot; type MaxCalls = MaxCalls; type ValidateCall = Lottery; @@ -1434,11 +1399,10 @@ parameter_types! { } impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Balance = u128; type AssetId = u32; type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; type AssetDeposit = AssetDeposit; type AssetAccountDeposit = ConstU128; @@ -1456,11 +1420,10 @@ parameter_types! { } impl pallet_dex::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type AssetBalance = ::Balance; type Assets = Assets; - type PoolAssets = Assets; type AssetId = ::AssetId; type PalletId = DexPalletId; } @@ -1477,7 +1440,7 @@ parameter_types! { } impl pallet_gilt::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type CurrencyBalance = Balance; type AdminOrigin = frame_system::EnsureRoot; @@ -1502,7 +1465,7 @@ parameter_types! { } impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type CollectionId = u32; type ItemId = u32; type Currency = Balances; @@ -1526,9 +1489,9 @@ impl pallet_uniques::Config for Runtime { } impl pallet_transaction_storage::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; - type RuntimeCall = RuntimeCall; + type Call = Call; type FeeDestination = (); type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; type MaxBlockTransactions = @@ -1538,8 +1501,8 @@ impl pallet_transaction_storage::Config for Runtime { } impl pallet_whitelist::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type WhitelistOrigin = EnsureRoot; type DispatchWhitelistedOrigin = EnsureRoot; type PreimageProvider = Preimage; @@ -1553,7 +1516,7 @@ parameter_types! { } impl pallet_state_trie_migration::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ControlOrigin = EnsureRoot; type Currency = Balances; type MaxKeyLen = MigrationMaxKeyLen; @@ -1567,19 +1530,17 @@ impl pallet_state_trie_migration::Config for Runtime { type WeightInfo = (); } -const ALLIANCE_MOTION_DURATION_IN_BLOCKS: BlockNumber = 5 * DAYS; - parameter_types! { - pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION_IN_BLOCKS; + pub const AllianceMotionDuration: BlockNumber = 5 * DAYS; pub const AllianceMaxProposals: u32 = 100; pub const AllianceMaxMembers: u32 = 100; } type AllianceCollective = pallet_collective::Instance3; impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = AllianceMotionDuration; type MaxProposals = AllianceMaxProposals; type MaxMembers = AllianceMaxMembers; @@ -1592,12 +1553,11 @@ parameter_types! { pub const MaxFellows: u32 = AllianceMaxMembers::get() - MaxFounders::get(); pub const MaxAllies: u32 = 100; pub const AllyDeposit: Balance = 10 * DOLLARS; - pub const RetirementPeriod: BlockNumber = ALLIANCE_MOTION_DURATION_IN_BLOCKS + (1 * DAYS); } impl pallet_alliance::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Proposal = RuntimeCall; + type Event = Event; + type Proposal = Call; type AdminOrigin = EitherOfDiverse< EnsureRoot, pallet_collective::EnsureProportionMoreThan, @@ -1629,7 +1589,6 @@ impl pallet_alliance::Config for Runtime { type MaxMembersCount = AllianceMaxMembers; type AllyDeposit = AllyDeposit; type WeightInfo = pallet_alliance::weights::SubstrateWeight; - type RetirementPeriod = RetirementPeriod; } construct_runtime!( @@ -1682,7 +1641,7 @@ construct_runtime!( Gilt: pallet_gilt, Uniques: pallet_uniques, TransactionStorage: pallet_transaction_storage, - VoterList: pallet_bags_list::, + BagsList: pallet_bags_list, StateTrieMigration: pallet_state_trie_migration, ChildBounties: pallet_child_bounties, Referenda: pallet_referenda, @@ -1694,7 +1653,6 @@ construct_runtime!( NominationPools: pallet_nomination_pools, RankedPolls: pallet_referenda::, RankedCollective: pallet_ranked_collective, - FastUnstake: pallet_fast_unstake, Dex: pallet_dex, } ); @@ -1724,14 +1682,12 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_tx_payment::ChargeAssetTxPayment, ); - /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1739,16 +1695,8 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - Migrations, ->; - -// All migrations executed on runtime upgrade as a nested tuple of types implementing -// `OnRuntimeUpgrade`. -type Migrations = ( pallet_nomination_pools::migration::v2::MigrateToV2, - pallet_alliance::migration::Migration, - pallet_contracts::Migration, -); +>; /// MMR helper types. mod mmr { @@ -1768,10 +1716,9 @@ extern crate frame_benchmarking; mod benches { define_benchmarks!( [frame_benchmarking, BaselineBench::] - [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] - [pallet_bags_list, VoterList] + [pallet_bags_list, BagsList] [pallet_balances, Balances] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] @@ -1782,7 +1729,6 @@ mod benches { [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] - [pallet_fast_unstake, FastUnstake] [pallet_gilt, Gilt] [pallet_grandpa, Grandpa] [pallet_identity, Identity] @@ -1909,20 +1855,24 @@ impl_runtime_apis! { impl pallet_nomination_pools_runtime_api::NominationPoolsApi for Runtime { fn pending_rewards(member_account: AccountId) -> Balance { - NominationPools::pending_rewards(member_account).unwrap_or_default() + NominationPools::pending_rewards(member_account) } } impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeConfiguration { - let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); - sp_consensus_babe::BabeConfiguration { + fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { + // The choice of `c` parameter (where `1 - c` represents the + // probability of a slot being empty), is done in accordance to the + // slot duration and expected target block time, for safely + // resisting network delays of maximum two seconds. + // + sp_consensus_babe::BabeGenesisConfiguration { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), - c: epoch_config.c, - authorities: Babe::authorities().to_vec(), + c: BABE_GENESIS_EPOCH_CONFIG.c, + genesis_authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), - allowed_slots: epoch_config.allowed_slots, + allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, } } @@ -1974,75 +1924,49 @@ impl_runtime_apis! { } } - impl pallet_contracts::ContractsApi for Runtime + impl pallet_contracts_rpc_runtime_api::ContractsApi< + Block, AccountId, Balance, BlockNumber, Hash, + > + for Runtime { fn call( origin: AccountId, dest: AccountId, value: Balance, - gas_limit: Option, + gas_limit: u64, storage_deposit_limit: Option, input_data: Vec, ) -> pallet_contracts_primitives::ContractExecResult { - let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); - Contracts::bare_call( - origin, - dest, - value, - gas_limit, - storage_deposit_limit, - input_data, - true, - pallet_contracts::Determinism::Deterministic, - ) + Contracts::bare_call(origin, dest, value, gas_limit, storage_deposit_limit, input_data, true) } fn instantiate( origin: AccountId, value: Balance, - gas_limit: Option, + gas_limit: u64, storage_deposit_limit: Option, code: pallet_contracts_primitives::Code, data: Vec, salt: Vec, ) -> pallet_contracts_primitives::ContractInstantiateResult { - let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); - Contracts::bare_instantiate( - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data, - salt, - true - ) + Contracts::bare_instantiate(origin, value, gas_limit, storage_deposit_limit, code, data, salt, true) } fn upload_code( origin: AccountId, code: Vec, storage_deposit_limit: Option, - determinism: pallet_contracts::Determinism, ) -> pallet_contracts_primitives::CodeUploadResult { - Contracts::bare_upload_code( - origin, - code, - storage_deposit_limit, - determinism, - ) + Contracts::bare_upload_code(origin, code, storage_deposit_limit) } fn get_storage( address: AccountId, key: Vec, ) -> pallet_contracts_primitives::GetStorageResult { - Contracts::get_storage( - address, - key - ) + Contracts::get_storage(address, key) } } @@ -2068,44 +1992,52 @@ impl_runtime_apis! { } } - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info(call: RuntimeCall, len: u32) -> RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details(call: RuntimeCall, len: u32) -> FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - } - impl pallet_mmr::primitives::MmrApi< Block, mmr::Hash, - BlockNumber, > for Runtime { + fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) + -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> + { + Mmr::generate_batch_proof(vec![leaf_index]).and_then(|(leaves, proof)| + Ok(( + mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), + mmr::BatchProof::into_single_leaf_proof(proof)? + )) + ) + } + + fn verify_proof(leaf: mmr::EncodableOpaqueLeaf, proof: mmr::Proof) + -> Result<(), mmr::Error> + { + let leaf: mmr::Leaf = leaf + .into_opaque_leaf() + .try_decode() + .ok_or(mmr::Error::Verify)?; + Mmr::verify_leaves(vec![leaf], mmr::Proof::into_batch_proof(proof)) + } + + fn verify_proof_stateless( + root: mmr::Hash, + leaf: mmr::EncodableOpaqueLeaf, + proof: mmr::Proof + ) -> Result<(), mmr::Error> { + let node = mmr::DataOrHash::Data(leaf.into_opaque_leaf()); + pallet_mmr::verify_leaves_proof::(root, vec![node], mmr::Proof::into_batch_proof(proof)) + } + fn mmr_root() -> Result { Ok(Mmr::mmr_root()) } - fn generate_proof( - block_numbers: Vec, - best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { - Mmr::generate_proof(block_numbers, best_known_block_number).map( - |(leaves, proof)| { - ( - leaves - .into_iter() - .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) - .collect(), - proof, - ) - }, - ) + fn generate_batch_proof(leaf_indices: Vec) + -> Result<(Vec, mmr::BatchProof), mmr::Error> + { + Mmr::generate_batch_proof(leaf_indices) + .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -2115,10 +2047,10 @@ impl_runtime_apis! { Mmr::verify_leaves(leaves, proof) } - fn verify_proof_stateless( + fn verify_batch_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::BatchProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) @@ -2147,21 +2079,8 @@ impl_runtime_apis! { (weight, RuntimeBlockWeights::get().max_block) } - fn execute_block( - block: Block, - state_root_check: bool, - select: frame_try_runtime::TryStateSelect - ) -> Weight { - log::info!( - target: "node-runtime", - "try-runtime: executing block {:?} / root checks: {:?} / try-state-select: {:?}", - block.header.hash(), - state_root_check, - select, - ); - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, select).unwrap() + fn execute_block_no_check(block: Block) -> Weight { + Executive::execute_block_no_check(block) } } @@ -2214,14 +2133,22 @@ impl_runtime_apis! { impl baseline::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} - use frame_support::traits::WhitelistedStorageKeys; - let mut whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); - - // Treasury Account - // TODO: this is manual for now, someday we might be able to use a - // macro for this particular key - let treasury_key = frame_system::Account::::hashed_key_for(Treasury::account_id()); - whitelist.push(treasury_key.to_vec().into()); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + // System BlockWeight + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96").to_vec().into(), + // Treasury Account + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -2235,50 +2162,14 @@ impl_runtime_apis! { mod tests { use super::*; use frame_election_provider_support::NposSolution; - use frame_support::traits::WhitelistedStorageKeys; use frame_system::offchain::CreateSignedTransaction; - use sp_core::hexdisplay::HexDisplay; use sp_runtime::UpperOf; - use std::collections::HashSet; - - #[test] - fn check_whitelist() { - let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() - .iter() - .map(|e| HexDisplay::from(&e.key).to_string()) - .collect(); - - // Block Number - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac") - ); - // Total Issuance - assert!( - whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80") - ); - // Execution Phase - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a") - ); - // Event Count - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850") - ); - // System Events - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7") - ); - // System BlockWeight - assert!( - whitelist.contains("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96") - ); - } #[test] fn validate_transaction_submitter_bounds() { fn is_submit_signed_transaction() where - T: CreateSignedTransaction, + T: CreateSignedTransaction, { } @@ -2298,11 +2189,11 @@ mod tests { #[test] fn call_size() { - let size = core::mem::size_of::(); + let size = core::mem::size_of::(); assert!( size <= 208, - "size of RuntimeCall {} is more than 208 bytes: some calls have too big arguments, use Box to reduce the - size of RuntimeCall. + "size of Call {} is more than 208 bytes: some calls have too big arguments, use Box to reduce the + size of Call. If the limit is too strong, maybe consider increase the limit to 300.", size, ); diff --git a/bin/node/runtime/src/voter_bags.rs b/bin/node/runtime/src/voter_bags.rs index eb540c27abcc7..93790f028f457 100644 --- a/bin/node/runtime/src/voter_bags.rs +++ b/bin/node/runtime/src/voter_bags.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated bag thresholds. +//! Autogenerated voter bag thresholds. //! -//! Generated on 2022-08-15T19:26:59.939787+00:00 -//! Arguments -//! Total issuance: 100000000000000 -//! Minimum balance: 100000000000000 +//! Generated on 2021-07-05T09:17:40.469754927+00:00 //! for the node runtime. /// Existential weight for this runtime. diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 59f1fa94c9b20..18e979b95737f 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -35,8 +35,8 @@ use crate::{ use codec::{Decode, Encode}; use futures::executor; use kitchensink_runtime::{ - constants::currency::DOLLARS, AccountId, BalancesCall, CheckedExtrinsic, MinimumPeriod, - RuntimeCall, Signature, SystemCall, UncheckedExtrinsic, + constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, + Signature, SystemCall, UncheckedExtrinsic, }; use node_primitives::Block; use sc_block_builder::BlockBuilderProvider; @@ -308,12 +308,12 @@ impl<'a> Iterator for BlockContentIterator<'a> { )), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => - RuntimeCall::Balances(BalancesCall::transfer_keep_alive { + Call::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), value: kitchensink_runtime::ExistentialDeposit::get() + 1, }), BlockType::RandomTransfersReaping => { - RuntimeCall::Balances(BalancesCall::transfer { + Call::Balances(BalancesCall::transfer { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. @@ -321,8 +321,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, - BlockType::Noop => - RuntimeCall::System(SystemCall::remark { remark: Vec::new() }), + BlockType::Noop => Call::System(SystemCall::remark { remark: Vec::new() }), }, }, self.runtime_version.spec_version, @@ -389,10 +388,11 @@ impl BenchDb { keyring: &BenchKeyring, ) -> (Client, std::sync::Arc, TaskExecutor) { let db_config = sc_client_db::DatabaseSettings { - trie_cache_maximum_size: Some(16 * 1024 * 1024), + state_cache_size: 16 * 1024 * 1024, + state_cache_child_ratio: Some((0, 100)), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - blocks_pruning: sc_client_db::BlocksPruning::KeepAll, + keep_blocks: sc_client_db::KeepBlocks::All, }; let task_executor = TaskExecutor::new(); diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index dc53dc08ec6af..e3deb54ea8ac9 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } rand = "0.8" node-cli = { version = "3.0.0-dev", path = "../../node/cli" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index 7dfcaed773f40..641416154115b 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -37,51 +37,51 @@ use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. #[derive(Parser)] -#[command(rename_all = "kebab-case")] +#[clap(rename_all = "kebab-case")] enum ChainSpecBuilder { /// Create a new chain spec with the given authorities, endowed and sudo /// accounts. New { /// Authority key seed. - #[arg(long, short, required = true)] + #[clap(long, short, required = true)] authority_seeds: Vec, /// Active nominators (SS58 format), each backing a random subset of the aforementioned /// authorities. - #[arg(long, short, default_value = "0")] + #[clap(long, short, default_value = "0")] nominator_accounts: Vec, /// Endowed account address (SS58 format). - #[arg(long, short)] + #[clap(long, short)] endowed_accounts: Vec, /// Sudo account address (SS58 format). - #[arg(long, short)] + #[clap(long, short)] sudo_account: String, /// The path where the chain spec should be saved. - #[arg(long, short, default_value = "./chain_spec.json")] + #[clap(long, short, default_value = "./chain_spec.json")] chain_spec_path: PathBuf, }, /// Create a new chain spec with the given number of authorities and endowed /// accounts. Random keys will be generated as required. Generate { /// The number of authorities. - #[arg(long, short)] + #[clap(long, short)] authorities: usize, /// The number of nominators backing the aforementioned authorities. /// /// Will nominate a random subset of `authorities`. - #[arg(long, short, default_value_t = 0)] + #[clap(long, short, default_value = "0")] nominators: usize, /// The number of endowed accounts. - #[arg(long, short, default_value_t = 0)] + #[clap(long, short, default_value = "0")] endowed: usize, /// The path where the chain spec should be saved. - #[arg(long, short, default_value = "./chain_spec.json")] + #[clap(long, short, default_value = "./chain_spec.json")] chain_spec_path: PathBuf, /// Path to use when saving generated keystores for each authority. /// /// At this path, a new folder will be created for each authority's /// keystore named `auth-$i` where `i` is the authority index, i.e. /// `auth-0`, `auth-1`, etc. - #[arg(long, short)] + #[clap(long, short)] keystore_path: Option, }, } diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 77c323821a508..4c4e47e702be6 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -17,5 +17,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 280b90848fbf5..6773ba822340f 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -23,7 +23,7 @@ use sc_cli::{ }; #[derive(Debug, Parser)] -#[command( +#[clap( name = "subkey", author = "Parity Team ", about = "Utility for generating and restoring with Substrate keys", diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index e81d1b79e74ed..13dc6dca0dcd6 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -331,7 +331,7 @@ pub struct AllocationStats { /// The sum of every allocation ever made. /// /// This increases every time a new allocation is made. - pub bytes_allocated_sum: u128, + pub bytes_allocated_sum: u32, /// The amount of address space (in bytes) used by the allocator. /// @@ -435,7 +435,7 @@ impl FreeingBumpHeapAllocator { Header::Occupied(order).write_into(mem, header_ptr)?; self.stats.bytes_allocated += order.size() + HEADER_SIZE; - self.stats.bytes_allocated_sum += u128::from(order.size() + HEADER_SIZE); + self.stats.bytes_allocated_sum += order.size() + HEADER_SIZE; self.stats.bytes_allocated_peak = std::cmp::max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated); self.stats.address_space_used = self.bumper - self.original_heap_base; diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 8cb3ad565afb0..c8e9dc7482823 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -21,7 +21,7 @@ fnv = "1.0.6" futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } log = "0.4.17" -parking_lot = "0.12.1" +parking_lot = "0.12.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-executor = { version = "0.10.0-dev", path = "../executor" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 79cc0d7a16bcc..af8552886b72e 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -27,12 +27,12 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use sp_core::offchain::OffchainStorage; use sp_runtime::{ + generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - backend::AsTrieBackend, ChildStorageCollection, IndexOperation, OffchainChangesCollection, - StorageCollection, + ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, }; use sp_storage::{ChildInfo, StorageData, StorageKey}; use std::collections::{HashMap, HashSet}; @@ -215,13 +215,13 @@ pub trait BlockImportOperation { /// Mark a block as finalized. fn mark_finalized( &mut self, - hash: Block::Hash, + id: BlockId, justification: Option, ) -> sp_blockchain::Result<()>; /// Mark a block as new head. If both block import and set head are specified, set head /// overrides block import's best block rule. - fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()>; + fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; /// Add a transaction index operation. fn update_transaction_index(&mut self, index: Vec) @@ -251,7 +251,7 @@ pub trait Finalizer> { fn apply_finality( &self, operation: &mut ClientImportOperation, - block: Block::Hash, + id: BlockId, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -271,7 +271,7 @@ pub trait Finalizer> { /// while performing major synchronization work. fn finalize_block( &self, - block: Block::Hash, + id: BlockId, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -356,77 +356,77 @@ where /// Provides acess to storage primitives pub trait StorageProvider> { - /// Given a block's `Hash` and a key, return the value under the key in that block. + /// Given a `BlockId` and a key, return the value under the key in that block. fn storage( &self, - hash: Block::Hash, + id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a block's `Hash` and a key prefix, return the matching storage keys in that block. + /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. fn storage_keys( &self, - hash: Block::Hash, + id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a block's `Hash` and a key, return the value under the hash in that block. + /// Given a `BlockId` and a key, return the value under the hash in that block. fn storage_hash( &self, - hash: Block::Hash, + id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a block's `Hash` and a key prefix, return the matching child storage keys and values - /// in that block. + /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in + /// that block. fn storage_pairs( &self, - hash: Block::Hash, + id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a block's `Hash` and a key prefix, return a `KeyIterator` iterates matching storage - /// keys in that block. + /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in + /// that block. fn storage_keys_iter<'a>( &self, - hash: Block::Hash, + id: &BlockId, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; - /// Given a block's `Hash`, a key and a child storage key, return the value under the key in - /// that block. + /// Given a `BlockId`, a key and a child storage key, return the value under the key in that + /// block. fn child_storage( &self, - hash: Block::Hash, + id: &BlockId, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a block's `Hash`, a key prefix, and a child storage key, return the matching child - /// storage keys. + /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage + /// keys. fn child_storage_keys( &self, - hash: Block::Hash, + id: &BlockId, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a block's `Hash` and a key `prefix` and a child storage key, + /// Given a `BlockId` and a key `prefix` and a child storage key, /// return a `KeyIterator` that iterates matching storage keys in that block. fn child_storage_keys_iter<'a>( &self, - hash: Block::Hash, + id: &BlockId, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; - /// Given a block's `Hash`, a key and a child storage key, return the hash under the key in that + /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that /// block. fn child_storage_hash( &self, - hash: Block::Hash, + id: &BlockId, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -448,12 +448,7 @@ pub trait Backend: AuxStore + Send + Sync { /// Associated blockchain backend type. type Blockchain: BlockchainBackend; /// Associated state backend type. - type State: StateBackend> - + Send - + AsTrieBackend< - HashFor, - TrieBackendStorage = >>::TrieBackendStorage, - >; + type State: StateBackend> + Send; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; @@ -466,7 +461,7 @@ pub trait Backend: AuxStore + Send + Sync { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: Block::Hash, + block: BlockId, ) -> sp_blockchain::Result<()>; /// Commit block insertion. @@ -475,21 +470,21 @@ pub trait Backend: AuxStore + Send + Sync { transaction: Self::BlockImportOperation, ) -> sp_blockchain::Result<()>; - /// Finalize block with given `hash`. + /// Finalize block with given Id. /// /// This should only be called if the parent of the given block has been finalized. fn finalize_block( &self, - hash: Block::Hash, + block: BlockId, justification: Option, ) -> sp_blockchain::Result<()>; - /// Append justification to the block with the given `hash`. + /// Append justification to the block with the given Id. /// /// This should only be called for blocks that are already finalized. fn append_justification( &self, - hash: Block::Hash, + block: BlockId, justification: Justification, ) -> sp_blockchain::Result<()>; @@ -503,17 +498,16 @@ pub trait Backend: AuxStore + Send + Sync { fn offchain_storage(&self) -> Option; /// Returns true if state for given block is available. - fn have_state_at(&self, hash: Block::Hash, _number: NumberFor) -> bool { - self.state_at(hash).is_ok() + fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { + self.state_at(BlockId::Hash(*hash)).is_ok() } /// Returns state backend with post-state of given block. - fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result; + fn state_at(&self, block: BlockId) -> sp_blockchain::Result; /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an - /// inconsistent state. All blocks higher than the best block are also reverted and not counting - /// towards `n`. + /// inconsistent state. /// /// Returns the number of blocks that were successfully reverted and the list of finalized /// blocks that has been reverted. @@ -524,7 +518,7 @@ pub trait Backend: AuxStore + Send + Sync { ) -> sp_blockchain::Result<(NumberFor, HashSet)>; /// Discard non-best, unfinalized leaf block. - fn remove_leaf_block(&self, hash: Block::Hash) -> sp_blockchain::Result<()>; + fn remove_leaf_block(&self, hash: &Block::Hash) -> sp_blockchain::Result<()>; /// Insert auxiliary data into key-value store. fn insert_aux< diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 949fd16a30704..738f932a47bf0 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -18,11 +18,13 @@ //! A method call executor interface. +use codec::{Decode, Encode}; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; +use sp_core::NativeOrEncoded; use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ExecutionManager, ExecutionStrategy, OverlayedChanges, StorageProof}; -use std::cell::RefCell; +use std::{cell::RefCell, panic::UnwindSafe, result}; use crate::execution_extensions::ExecutionExtensions; use sp_api::{ProofRecorder, StorageTransactionCache}; @@ -66,9 +68,11 @@ pub trait CallExecutor: RuntimeVersionOf { /// of the execution context. fn contextual_call< EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, at: &BlockId, @@ -81,9 +85,10 @@ pub trait CallExecutor: RuntimeVersionOf { >, >, execution_manager: ExecutionManager, + native_call: Option, proof_recorder: &Option>, extensions: Option, - ) -> sp_blockchain::Result> + ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/api/src/client.rs b/client/api/src/client.rs index bb88853d23afb..b809e0ee61032 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -110,7 +110,7 @@ pub trait BlockBackend { /// Get block body by ID. Returns `None` if the body is not stored. fn block_body( &self, - hash: Block::Hash, + id: &BlockId, ) -> sp_blockchain::Result::Extrinsic>>>; /// Get all indexed transactions for a block, @@ -118,7 +118,10 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>>; + fn block_indexed_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result>>>; /// Get full block by id. fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; @@ -128,7 +131,7 @@ pub trait BlockBackend { -> sp_blockchain::Result; /// Get block justifications for the block with the given id. - fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result>; + fn justifications(&self, id: &BlockId) -> sp_blockchain::Result>; /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; @@ -137,10 +140,10 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>>; + fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>>; /// Check if transaction index exists. - fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { + fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { Ok(self.indexed_transaction(hash)?.is_some()) } diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 07a483bc3eaf2..574687312c82b 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -201,11 +201,11 @@ impl ExecutionExtensions { /// /// Based on the execution context and capabilities it produces /// the right manager and extensions object to support desired set of APIs. - pub fn manager_and_extensions( + pub fn manager_and_extensions( &self, at: &BlockId, context: ExecutionContext, - ) -> (ExecutionManager>, Extensions) { + ) -> (ExecutionManager>, Extensions) { let manager = match context { ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), ExecutionContext::Syncing => self.strategies.syncing.get_manager(), diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 5a3e25ab5987b..a8a7442a8ef9f 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -173,7 +173,7 @@ impl Blockchain { { let mut storage = self.storage.write(); - storage.leaves.import(hash, number, *header.parent_hash()); + storage.leaves.import(hash, number, header.parent_hash().clone()); storage.blocks.insert(hash, StoredBlock::new(header, body, justifications)); if let NewBlockState::Final = new_state { @@ -223,10 +223,10 @@ impl Blockchain { } /// Set an existing block as head. - pub fn set_head(&self, hash: Block::Hash) -> sp_blockchain::Result<()> { + pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { let header = self - .header(BlockId::Hash(hash))? - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))?; + .header(id)? + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))?; self.apply_head(&header) } @@ -271,16 +271,21 @@ impl Blockchain { fn finalize_header( &self, - block: Block::Hash, + id: BlockId, justification: Option, ) -> sp_blockchain::Result<()> { + let hash = match self.header(id)? { + Some(h) => h.hash(), + None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), + }; + let mut storage = self.storage.write(); - storage.finalized_hash = block; + storage.finalized_hash = hash; if justification.is_some() { let block = storage .blocks - .get_mut(&block) + .get_mut(&hash) .expect("hash was fetched from a block in the db; qed"); let block_justifications = match block { @@ -295,9 +300,10 @@ impl Blockchain { fn append_justification( &self, - hash: Block::Hash, + id: BlockId, justification: Justification, ) -> sp_blockchain::Result<()> { + let hash = self.expect_block_hash_from_id(&id)?; let mut storage = self.storage.write(); let block = storage @@ -405,18 +411,21 @@ impl HeaderMetadata for Blockchain { impl blockchain::Backend for Blockchain { fn body( &self, - hash: Block::Hash, + id: BlockId, ) -> sp_blockchain::Result::Extrinsic>>> { - Ok(self - .storage - .read() - .blocks - .get(&hash) - .and_then(|b| b.extrinsics().map(|x| x.to_vec()))) + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.extrinsics().map(|x| x.to_vec())) + })) } - fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { - Ok(self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned())) + fn justifications(&self, id: BlockId) -> sp_blockchain::Result> { + Ok(self.id(id).and_then(|hash| { + self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned()) + })) } fn last_finalized(&self) -> sp_blockchain::Result { @@ -445,13 +454,13 @@ impl blockchain::Backend for Blockchain { unimplemented!() } - fn indexed_transaction(&self, _hash: Block::Hash) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, _hash: &Block::Hash) -> sp_blockchain::Result>> { unimplemented!("Not supported by the in-mem backend.") } fn block_indexed_body( &self, - _hash: Block::Hash, + _id: BlockId, ) -> sp_blockchain::Result>>> { unimplemented!("Not supported by the in-mem backend.") } @@ -491,8 +500,8 @@ pub struct BlockImportOperation { new_state: Option<> as StateBackend>>::Transaction>, aux: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(Block::Hash, Option)>, - set_head: Option, + finalized_blocks: Vec<(BlockId, Option)>, + set_head: Option>, } impl BlockImportOperation @@ -507,7 +516,7 @@ where ) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let child_delta = storage.children_default.values().map(|child_content| { + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), @@ -596,16 +605,16 @@ where fn mark_finalized( &mut self, - hash: Block::Hash, + block: BlockId, justification: Option, ) -> sp_blockchain::Result<()> { - self.finalized_blocks.push((hash, justification)); + self.finalized_blocks.push((block, justification)); Ok(()) } - fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()> { + fn mark_head(&mut self, block: BlockId) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one set block per operation is allowed"); - self.set_head = Some(hash); + self.set_head = Some(block); Ok(()) } @@ -677,7 +686,7 @@ where type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { - let old_state = self.state_at(Default::default())?; + let old_state = self.state_at(BlockId::Hash(Default::default()))?; Ok(BlockImportOperation { pending_block: None, old_state, @@ -691,7 +700,7 @@ where fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: Block::Hash, + block: BlockId, ) -> sp_blockchain::Result<()> { operation.old_state = self.state_at(block)?; Ok(()) @@ -733,18 +742,18 @@ where fn finalize_block( &self, - hash: Block::Hash, + block: BlockId, justification: Option, ) -> sp_blockchain::Result<()> { - self.blockchain.finalize_header(hash, justification) + self.blockchain.finalize_header(block, justification) } fn append_justification( &self, - hash: Block::Hash, + block: BlockId, justification: Justification, ) -> sp_blockchain::Result<()> { - self.blockchain.append_justification(hash, justification) + self.blockchain.append_justification(block, justification) } fn blockchain(&self) -> &Self::Blockchain { @@ -759,16 +768,16 @@ where None } - fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { - if hash == Default::default() { - return Ok(Self::State::default()) + fn state_at(&self, block: BlockId) -> sp_blockchain::Result { + match block { + BlockId::Hash(h) if h == Default::default() => return Ok(Self::State::default()), + _ => {}, } - self.states - .read() - .get(&hash) - .cloned() - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash))) + self.blockchain + .id(block) + .and_then(|id| self.states.read().get(&id).cloned()) + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", block))) } fn revert( @@ -779,7 +788,7 @@ where Ok((Zero::zero(), HashSet::new())) } - fn remove_leaf_block(&self, _hash: Block::Hash) -> sp_blockchain::Result<()> { + fn remove_leaf_block(&self, _hash: &Block::Hash) -> sp_blockchain::Result<()> { Ok(()) } @@ -814,7 +823,7 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { #[cfg(test)] mod tests { use crate::{in_mem::Blockchain, NewBlockState}; - use sp_api::HeaderT; + use sp_api::{BlockId, HeaderT}; use sp_blockchain::Backend; use sp_runtime::{ConsensusEngineId, Justifications}; use substrate_test_runtime::{Block, Header, H256}; @@ -861,24 +870,26 @@ mod tests { fn append_and_retrieve_justifications() { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); + let block = BlockId::Hash(last_finalized); - blockchain.append_justification(last_finalized, (ID2, vec![4])).unwrap(); + blockchain.append_justification(block, (ID2, vec![4])).unwrap(); let justifications = { let mut just = Justifications::from((ID1, vec![3])); just.append((ID2, vec![4])); just }; - assert_eq!(blockchain.justifications(last_finalized).unwrap(), Some(justifications)); + assert_eq!(blockchain.justifications(block).unwrap(), Some(justifications)); } #[test] fn store_duplicate_justifications_is_forbidden() { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); + let block = BlockId::Hash(last_finalized); - blockchain.append_justification(last_finalized, (ID2, vec![0])).unwrap(); + blockchain.append_justification(block, (ID2, vec![0])).unwrap(); assert!(matches!( - blockchain.append_justification(last_finalized, (ID2, vec![1])), + blockchain.append_justification(block, (ID2, vec![1])), Err(sp_blockchain::Error::BadJustification(_)), )); } diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index cdcb80a110b74..2e5d4be3a5462 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -32,36 +32,32 @@ struct LeafSetItem { number: Reverse, } -/// Inserted and removed leaves after an import action. -pub struct ImportOutcome { - inserted: LeafSetItem, - removed: Option, +/// A displaced leaf after import. +#[must_use = "Displaced items from the leaf set must be handled."] +pub struct ImportDisplaced { + new_hash: H, + displaced: LeafSetItem, } -/// Inserted and removed leaves after a remove action. -pub struct RemoveOutcome { - inserted: Option, - removed: LeafSetItem, +/// Displaced leaves after finalization. +#[must_use = "Displaced items from the leaf set must be handled."] +pub struct FinalizationDisplaced { + leaves: BTreeMap, Vec>, } -/// Removed leaves after a finalization action. -pub struct FinalizationOutcome { - removed: BTreeMap, Vec>, -} - -impl FinalizationOutcome { +impl FinalizationDisplaced { /// Merge with another. This should only be used for displaced items that /// are produced within one transaction of each other. pub fn merge(&mut self, mut other: Self) { // this will ignore keys that are in duplicate, however // if these are actually produced correctly via the leaf-set within // one transaction, then there will be no overlap in the keys. - self.removed.append(&mut other.removed); + self.leaves.append(&mut other.leaves); } /// Iterate over all displaced leaves. pub fn leaves(&self) -> impl Iterator { - self.removed.values().flatten() + self.leaves.values().flatten() } } @@ -103,52 +99,27 @@ where } /// Update the leaf list on import. - pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> ImportOutcome { - let number = Reverse(number); - - let removed = if number.0 != N::zero() { - let parent_number = Reverse(number.0.clone() - N::one()); - self.remove_leaf(&parent_number, &parent_hash).then(|| parent_hash) - } else { - None - }; - - self.insert_leaf(number.clone(), hash.clone()); - - ImportOutcome { inserted: LeafSetItem { hash, number }, removed } - } - - /// Update the leaf list on removal. - /// - /// Note that the leaves set structure doesn't have the information to decide if the - /// leaf we're removing is the last children of the parent. Follows that this method requires - /// the caller to check this condition and optionally pass the `parent_hash` if `hash` is - /// its last child. - /// - /// Returns `None` if no modifications are applied. - pub fn remove( - &mut self, - hash: H, - number: N, - parent_hash: Option, - ) -> Option> { - let number = Reverse(number); - - if !self.remove_leaf(&number, &hash) { - return None - } - - let inserted = parent_hash.and_then(|parent_hash| { - if number.0 != N::zero() { - let parent_number = Reverse(number.0.clone() - N::one()); - self.insert_leaf(parent_number, parent_hash.clone()); - Some(parent_hash) + /// Returns a displaced leaf if there was one. + pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { + // avoid underflow for genesis. + let displaced = if number != N::zero() { + let parent_number = Reverse(number.clone() - N::one()); + let was_displaced = self.remove_leaf(&parent_number, &parent_hash); + + if was_displaced { + Some(ImportDisplaced { + new_hash: hash.clone(), + displaced: LeafSetItem { hash: parent_hash, number: parent_number }, + }) } else { None } - }); + } else { + None + }; - Some(RemoveOutcome { inserted, removed: LeafSetItem { hash, number } }) + self.insert_leaf(Reverse(number.clone()), hash.clone()); + displaced } /// Note a block height finalized, displacing all leaves with number less than the finalized @@ -158,15 +129,15 @@ where /// same number as the finalized block, but with different hashes, the current behavior /// is simpler and our assumptions about how finalization works means that those leaves /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { + pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } + return FinalizationDisplaced { leaves: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.split_off(&Reverse(boundary)); - FinalizationOutcome { removed: below_boundary } + FinalizationDisplaced { leaves: below_boundary } } /// The same as [`Self::finalize_height`], but it only simulates the operation. @@ -174,16 +145,16 @@ where /// This means that no changes are done. /// /// Returns the leaves that would be displaced by finalizing the given block. - pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { + pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationDisplaced { let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } + return FinalizationDisplaced { leaves: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.range(&Reverse(boundary)..); - FinalizationOutcome { - removed: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), + FinalizationDisplaced { + leaves: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), } } @@ -288,11 +259,6 @@ where removed } - - /// Returns the highest leaf and all hashes associated to it. - pub fn highest_leaf(&self) -> Option<(N, &[H])> { - self.storage.iter().next().map(|(k, v)| (k.0.clone(), &v[..])) - } } /// Helper for undoing operations. @@ -305,30 +271,16 @@ where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { - /// Undo an imported block by providing the import operation outcome. - /// No additional operations should be performed between import and undo. - pub fn undo_import(&mut self, outcome: ImportOutcome) { - if let Some(removed_hash) = outcome.removed { - let removed_number = Reverse(outcome.inserted.number.0.clone() - N::one()); - self.inner.insert_leaf(removed_number, removed_hash); - } - self.inner.remove_leaf(&outcome.inserted.number, &outcome.inserted.hash); - } - - /// Undo a removed block by providing the displaced leaf. - /// No additional operations should be performed between remove and undo. - pub fn undo_remove(&mut self, outcome: RemoveOutcome) { - if let Some(inserted_hash) = outcome.inserted { - let inserted_number = Reverse(outcome.removed.number.0.clone() - N::one()); - self.inner.remove_leaf(&inserted_number, &inserted_hash); - } - self.inner.insert_leaf(outcome.removed.number, outcome.removed.hash); + /// Undo an imported block by providing the displaced leaf. + pub fn undo_import(&mut self, displaced: ImportDisplaced) { + let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); + self.inner.remove_leaf(&new_number, &displaced.new_hash); + self.inner.insert_leaf(displaced.displaced.number, displaced.displaced.hash); } /// Undo a finalization operation by providing the displaced leaves. - /// No additional operations should be performed between finalization and undo. - pub fn undo_finalization(&mut self, mut outcome: FinalizationOutcome) { - self.inner.storage.append(&mut outcome.removed); + pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { + self.inner.storage.append(&mut displaced.leaves); } } @@ -338,7 +290,7 @@ mod tests { use std::sync::Arc; #[test] - fn import_works() { + fn it_works() { let mut set = LeafSet::new(); set.import(0u32, 0u32, 0u32); @@ -360,90 +312,6 @@ mod tests { assert!(set.contains(3, 3_1)); assert!(set.contains(2, 2_2)); assert!(set.contains(2, 2_3)); - - // Finally test the undo feature - - let outcome = set.import(2_4, 2, 1_1); - assert_eq!(outcome.inserted.hash, 2_4); - assert_eq!(outcome.removed, None); - assert_eq!(set.count(), 4); - assert!(set.contains(2, 2_4)); - - set.undo().undo_import(outcome); - assert_eq!(set.count(), 3); - assert!(set.contains(3, 3_1)); - assert!(set.contains(2, 2_2)); - assert!(set.contains(2, 2_3)); - - let outcome = set.import(3_2, 3, 2_3); - assert_eq!(outcome.inserted.hash, 3_2); - assert_eq!(outcome.removed, Some(2_3)); - assert_eq!(set.count(), 3); - assert!(set.contains(3, 3_2)); - - set.undo().undo_import(outcome); - assert_eq!(set.count(), 3); - assert!(set.contains(3, 3_1)); - assert!(set.contains(2, 2_2)); - assert!(set.contains(2, 2_3)); - } - - #[test] - fn removal_works() { - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_1); - set.import(11_2, 11, 10_1); - set.import(12_1, 12, 11_1); - - let outcome = set.remove(12_1, 12, Some(11_1)).unwrap(); - assert_eq!(outcome.removed.hash, 12_1); - assert_eq!(outcome.inserted, Some(11_1)); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_1)); - assert!(set.contains(11, 11_2)); - - let outcome = set.remove(11_1, 11, None).unwrap(); - assert_eq!(outcome.removed.hash, 11_1); - assert_eq!(outcome.inserted, None); - assert_eq!(set.count(), 1); - assert!(set.contains(11, 11_2)); - - let outcome = set.remove(11_2, 11, Some(10_1)).unwrap(); - assert_eq!(outcome.removed.hash, 11_2); - assert_eq!(outcome.inserted, Some(10_1)); - assert_eq!(set.count(), 1); - assert!(set.contains(10, 10_1)); - - set.undo().undo_remove(outcome); - assert_eq!(set.count(), 1); - assert!(set.contains(11, 11_2)); - } - - #[test] - fn finalization_works() { - let mut set = LeafSet::new(); - set.import(9_1u32, 9u32, 0u32); - set.import(10_1, 10, 9_1); - set.import(10_2, 10, 9_1); - set.import(11_1, 11, 10_1); - set.import(11_2, 11, 10_1); - set.import(12_1, 12, 11_2); - - let outcome = set.finalize_height(11); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert_eq!( - outcome.removed, - [(Reverse(10), vec![10_2])].into_iter().collect::>(), - ); - - set.undo().undo_finalization(outcome); - assert_eq!(set.count(), 3); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert!(set.contains(10, 10_2)); } #[test] @@ -510,4 +378,44 @@ mod tests { let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); assert_eq!(set, set2); } + + #[test] + fn undo_import() { + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + + let displaced = set.import(12_1, 12, 11_1).unwrap(); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_2)); + assert!(set.contains(12, 12_1)); + + set.undo().undo_import(displaced); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(11, 11_2)); + } + + #[test] + fn undo_finalization() { + let mut set = LeafSet::new(); + set.import(9_1u32, 9u32, 0u32); + set.import(10_1, 10, 9_1); + set.import(10_2, 10, 9_1); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + set.import(12_1, 12, 11_2); + + let displaced = set.finalize_height(11); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + + set.undo().undo_finalization(displaced); + assert_eq!(set.count(), 3); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + assert!(set.contains(10, 10_2)); + } } diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 01e35df1dec1c..3aad4af1befb5 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -18,7 +18,7 @@ //! Proof utilities use crate::{CompactProof, StorageProof}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{KeyValueStates, KeyValueStorageLevel}; use sp_storage::ChildInfo; @@ -27,7 +27,7 @@ pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, - hash: Block::Hash, + id: &BlockId, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -35,7 +35,7 @@ pub trait ProofProvider { /// read proof. fn read_child_proof( &self, - hash: Block::Hash, + id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -46,12 +46,12 @@ pub trait ProofProvider { /// No changes are made. fn execution_proof( &self, - hash: Block::Hash, + id: &BlockId, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; - /// Given a `Hash` iterate over all storage values starting at `start_keys`. + /// Given a `BlockId` iterate over all storage values starting at `start_keys`. /// Last `start_keys` element contains last accessed key value. /// With multiple `start_keys`, first `start_keys` element is /// the current storage key of of the last accessed child trie. @@ -61,12 +61,12 @@ pub trait ProofProvider { /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, - hash: Block::Hash, + id: &BlockId, start_keys: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)>; - /// Given a `Hash` iterate over all storage values starting at `start_key`. + /// Given a `BlockId` iterate over all storage values starting at `start_key`. /// Returns collected keys and values. /// Returns the collected keys values content of the top trie followed by the /// collected keys values of child tries. @@ -76,7 +76,7 @@ pub trait ProofProvider { /// end. fn storage_collection( &self, - hash: Block::Hash, + id: &BlockId, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result>; diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 91c977d90a660..012e6096aab80 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -14,28 +14,28 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.11" +prost-build = "0.10" [dependencies] +async-trait = "0.1" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" -libp2p = { version = "0.49.0", default-features = false, features = ["kad"] } +libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } log = "0.4.17" -prost = "0.11" +prost = "0.10" rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-network = { version = "0.10.0-dev", path = "../network" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-keystore = { version = "0.12.0", path = "../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } -async-trait = "0.1.56" [dev-dependencies] quickcheck = { version = "1.0.3", default-features = false } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index 285a2714b81f5..bce39069ef7c7 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -57,7 +57,7 @@ pub enum Error { ParsingMultiaddress(#[from] libp2p::core::multiaddr::Error), #[error("Failed to parse a libp2p key.")] - ParsingLibp2pIdentity(#[from] libp2p::identity::error::DecodingError), + ParsingLibp2pIdentity(#[from] sc_network::DecodingError), #[error("Failed to sign using a specific public key.")] MissingSignature(CryptoTypePublicPair), diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index db3802b168fe5..8522da9984a6f 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -29,7 +29,7 @@ pub use crate::{ service::Service, - worker::{AuthorityDiscovery, NetworkProvider, Role, Worker}, + worker::{NetworkProvider, Role, Worker}, }; use std::{collections::HashSet, sync::Arc, time::Duration}; @@ -39,10 +39,10 @@ use futures::{ Stream, }; -use libp2p::{Multiaddr, PeerId}; -use sc_network_common::protocol::event::DhtEvent; -use sp_authority_discovery::AuthorityId; -use sp_blockchain::HeaderBackend; +use sc_client_api::blockchain::HeaderBackend; +use sc_network::{DhtEvent, Multiaddr, PeerId}; +use sp_api::ProvideRuntimeApi; +use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; use sp_runtime::traits::Block as BlockT; mod error; @@ -121,7 +121,8 @@ pub fn new_worker_and_service( where Block: BlockT + Unpin + 'static, Network: NetworkProvider, - Client: AuthorityDiscovery + Send + Sync + 'static + HeaderBackend, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { new_worker_and_service_with_config( @@ -148,7 +149,8 @@ pub fn new_worker_and_service_with_config + HeaderBackend + 'static, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { let (to_worker, from_service) = mpsc::channel(0); diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index df09b6ea43216..c240e5d0c2287 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -25,7 +25,7 @@ use futures::{ SinkExt, }; -use libp2p::{Multiaddr, PeerId}; +use sc_network::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; /// Service to interact with the [`crate::Worker`]. diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 208440b7ab1ea..e9f94b6a186db 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -87,11 +87,14 @@ fn get_addresses_and_authority_id() { fn cryptos_are_compatible() { use sp_core::crypto::Pair; - let libp2p_secret = libp2p::identity::Keypair::generate_ed25519(); + let libp2p_secret = sc_network::Keypair::generate_ed25519(); let libp2p_public = libp2p_secret.public(); let sp_core_secret = { - let libp2p::identity::Keypair::Ed25519(libp2p_ed_secret) = libp2p_secret.clone(); + let libp2p_ed_secret = match libp2p_secret.clone() { + sc_network::Keypair::Ed25519(x) => x, + _ => panic!("generate_ed25519 should have generated an Ed25519 key ¯\\_(ツ)_/¯"), + }; sp_core::ed25519::Pair::from_seed_slice(&libp2p_ed_secret.secret().as_ref()).unwrap() }; let sp_core_public = sp_core_secret.public(); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 4121b64e00b9b..87cc72ba7a69c 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -32,27 +32,23 @@ use std::{ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; +use async_trait::async_trait; use codec::Decode; use ip_network::IpNetwork; use libp2p::{ core::multiaddr, multihash::{Multihash, MultihashDigest}, - Multiaddr, PeerId, }; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; -use sc_network_common::{ - protocol::event::DhtEvent, - service::{KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature}, -}; -use sp_api::{ApiError, ProvideRuntimeApi}; +use sc_client_api::blockchain::HeaderBackend; +use sc_network::{DhtEvent, ExHashT, Multiaddr, NetworkStateInfo, PeerId}; +use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, }; -use sp_blockchain::HeaderBackend; - use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; use sp_keystore::CryptoStore; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; @@ -140,7 +136,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, /// Set of in-flight lookups. - in_flight_lookups: HashMap, + in_flight_lookups: HashMap, addr_cache: addr_cache::AddrCache, @@ -151,35 +147,12 @@ pub struct Worker { phantom: PhantomData, } -/// Wrapper for [`AuthorityDiscoveryApi`](sp_authority_discovery::AuthorityDiscoveryApi). Can be -/// be implemented by any struct without dependency on the runtime. -#[async_trait::async_trait] -pub trait AuthorityDiscovery { - /// Retrieve authority identifiers of the current and next authority set. - async fn authorities(&self, at: Block::Hash) - -> std::result::Result, ApiError>; -} - -#[async_trait::async_trait] -impl AuthorityDiscovery for T -where - T: ProvideRuntimeApi + Send + Sync, - T::Api: AuthorityDiscoveryApi, - Block: BlockT, -{ - async fn authorities( - &self, - at: Block::Hash, - ) -> std::result::Result, ApiError> { - self.runtime_api().authorities(&BlockId::Hash(at)) - } -} - impl Worker where Block: BlockT + Unpin + 'static, Network: NetworkProvider, - Client: AuthorityDiscovery + HeaderBackend + 'static, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { /// Construct a [`Worker`]. @@ -378,7 +351,7 @@ where } async fn refill_pending_lookups_queue(&mut self) -> Result<()> { - let best_hash = self.client.info().best_hash; + let id = BlockId::hash(self.client.info().best_hash); let local_keys = match &self.role { Role::PublishAndDiscover(key_store) => key_store @@ -391,8 +364,8 @@ where let mut authorities = self .client - .authorities(best_hash) - .await + .runtime_api() + .authorities(&id) .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .filter(|id| !local_keys.contains(id.as_ref())) @@ -491,7 +464,10 @@ where } } - fn handle_dht_value_found_event(&mut self, values: Vec<(KademliaKey, Vec)>) -> Result<()> { + fn handle_dht_value_found_event( + &mut self, + values: Vec<(sc_network::KademliaKey, Vec)>, + ) -> Result<()> { // Ensure `values` is not empty and all its keys equal. let remote_key = single(values.iter().map(|(key, _)| key.clone())) .map_err(|_| Error::ReceivingDhtValueFoundEventWithDifferentKeys)? @@ -547,11 +523,11 @@ where // properly signed by the owner of the PeerId if let Some(peer_signature) = peer_signature { - let public_key = libp2p::identity::PublicKey::from_protobuf_encoding( - &peer_signature.public_key, - ) - .map_err(Error::ParsingLibp2pIdentity)?; - let signature = Signature { public_key, bytes: peer_signature.signature }; + let public_key = + sc_network::PublicKey::from_protobuf_encoding(&peer_signature.public_key) + .map_err(Error::ParsingLibp2pIdentity)?; + let signature = + sc_network::Signature { public_key, bytes: peer_signature.signature }; if !signature.verify(record, &remote_peer_id) { return Err(Error::VerifyingDhtPayload) @@ -598,10 +574,10 @@ where .into_iter() .collect::>(); - let best_hash = client.info().best_hash; + let id = BlockId::hash(client.info().best_hash); let authorities = client - .authorities(best_hash) - .await + .runtime_api() + .authorities(&id) .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .map(Into::into) @@ -614,15 +590,55 @@ where } } +pub trait NetworkSigner { + /// Sign a message in the name of `self.local_peer_id()` + fn sign_with_local_identity( + &self, + msg: impl AsRef<[u8]>, + ) -> std::result::Result; +} + /// NetworkProvider provides [`Worker`] with all necessary hooks into the /// underlying Substrate networking. Using this trait abstraction instead of -/// `sc_network::NetworkService` directly is necessary to unit test [`Worker`]. -pub trait NetworkProvider: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} +/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. +#[async_trait] +pub trait NetworkProvider: NetworkStateInfo + NetworkSigner { + /// Start putting a value in the Dht. + fn put_value(&self, key: sc_network::KademliaKey, value: Vec); + + /// Start getting a value from the Dht. + fn get_value(&self, key: &sc_network::KademliaKey); +} -impl NetworkProvider for T where T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} +impl NetworkSigner for sc_network::NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn sign_with_local_identity( + &self, + msg: impl AsRef<[u8]>, + ) -> std::result::Result { + self.sign_with_local_identity(msg) + } +} + +#[async_trait::async_trait] +impl NetworkProvider for sc_network::NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { + self.put_value(key, value) + } + fn get_value(&self, key: &sc_network::KademliaKey) { + self.get_value(key) + } +} -fn hash_authority_id(id: &[u8]) -> KademliaKey { - KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) +fn hash_authority_id(id: &[u8]) -> sc_network::KademliaKey { + sc_network::KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) } // Makes sure all values are the same and returns it @@ -669,7 +685,7 @@ async fn sign_record_with_authority_ids( peer_signature: Option, key_store: &dyn CryptoStore, keys: Vec, -) -> Result)>> { +) -> Result)>> { let signatures = key_store .sign_with_all(key_types::AUTHORITY_DISCOVERY, keys.clone(), &serialized_record) .await diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 19bbbf0b62e7e..f768b9c4e66a7 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -16,10 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use libp2p::{ - core::multiaddr::{Multiaddr, Protocol}, - PeerId, -}; +use libp2p::core::multiaddr::{Multiaddr, Protocol}; + +use sc_network::PeerId; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; diff --git a/client/authority-discovery/src/worker/schema/tests.rs b/client/authority-discovery/src/worker/schema/tests.rs index 60147d6762e50..b85a4ce37447d 100644 --- a/client/authority-discovery/src/worker/schema/tests.rs +++ b/client/authority-discovery/src/worker/schema/tests.rs @@ -21,8 +21,9 @@ mod schema_v1 { } use super::*; -use libp2p::{multiaddr::Multiaddr, PeerId}; +use libp2p::multiaddr::Multiaddr; use prost::Message; +use sc_network::PeerId; #[test] fn v2_decodes_v1() { @@ -55,7 +56,7 @@ fn v2_decodes_v1() { #[test] fn v1_decodes_v2() { - let peer_secret = libp2p::identity::Keypair::generate_ed25519(); + let peer_secret = sc_network::Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 8e7a221877574..a1a699bc30dd2 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -22,6 +22,7 @@ use std::{ task::Poll, }; +use async_trait::async_trait; use futures::{ channel::mpsc::{self, channel}, executor::{block_on, LocalPool}, @@ -29,11 +30,9 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{core::multiaddr, identity::Keypair, PeerId}; +use libp2p::{core::multiaddr, PeerId}; use prometheus_endpoint::prometheus::default_registry; -use sc_client_api::HeaderBackend; -use sc_network_common::service::{KademliaKey, Signature, SigningError}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::KeyStore, CryptoStore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -49,7 +48,7 @@ pub(crate) struct TestApi { impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; - fn runtime_api(&self) -> ApiRef<'_, Self::Api> { + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { RuntimeApi { authorities: self.authorities.clone() }.into() } } @@ -112,18 +111,18 @@ sp_api::mock_impl_runtime_apis! { #[derive(Debug)] pub enum TestNetworkEvent { - GetCalled(KademliaKey), - PutCalled(KademliaKey, Vec), + GetCalled(sc_network::KademliaKey), + PutCalled(sc_network::KademliaKey, Vec), } pub struct TestNetwork { peer_id: PeerId, - identity: Keypair, + identity: sc_network::Keypair, external_addresses: Vec, // Whenever functions on `TestNetwork` are called, the function arguments are added to the // vectors below. - pub put_value_call: Arc)>>>, - pub get_value_call: Arc>>, + pub put_value_call: Arc)>>>, + pub get_value_call: Arc>>, event_sender: mpsc::UnboundedSender, event_receiver: Option>, } @@ -137,7 +136,7 @@ impl TestNetwork { impl Default for TestNetwork { fn default() -> Self { let (tx, rx) = mpsc::unbounded(); - let identity = Keypair::generate_ed25519(); + let identity = sc_network::Keypair::generate_ed25519(); TestNetwork { peer_id: identity.public().to_peer_id(), identity, @@ -154,20 +153,21 @@ impl NetworkSigner for TestNetwork { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - Signature::sign_message(msg, &self.identity) + ) -> std::result::Result { + sc_network::Signature::sign_message(msg, &self.identity) } } -impl NetworkDHTProvider for TestNetwork { - fn put_value(&self, key: KademliaKey, value: Vec) { +#[async_trait] +impl NetworkProvider for TestNetwork { + fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); self.event_sender .clone() .unbounded_send(TestNetworkEvent::PutCalled(key, value)) .unwrap(); } - fn get_value(&self, key: &KademliaKey) { + fn get_value(&self, key: &sc_network::KademliaKey) { self.get_value_call.lock().unwrap().push(key.clone()); self.event_sender .clone() @@ -186,16 +186,12 @@ impl NetworkStateInfo for TestNetwork { } } -struct TestSigner<'a> { - keypair: &'a Keypair, -} - -impl<'a> NetworkSigner for TestSigner<'a> { +impl NetworkSigner for sc_network::Keypair { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - Signature::sign_message(msg, self.keypair) + ) -> std::result::Result { + sc_network::Signature::sign_message(msg, self) } } @@ -204,7 +200,7 @@ async fn build_dht_event( public_key: AuthorityId, key_store: &dyn CryptoStore, network: Option<&Signer>, -) -> Vec<(KademliaKey, Vec)> { +) -> Vec<(sc_network::KademliaKey, Vec)> { let serialized_record = serialize_authority_record(serialize_addresses(addresses.into_iter())).unwrap(); @@ -317,7 +313,7 @@ fn publish_discover_cycle() { let dht_event = { let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - DhtEvent::ValueFound(vec![(key, value)]) + sc_network::DhtEvent::ValueFound(vec![(key, value)]) }; // Node B discovering node A's address. @@ -473,7 +469,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { None, ) .await; - DhtEvent::ValueFound(kv_pairs) + sc_network::DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -491,7 +487,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { struct DhtValueFoundTester { pub remote_key_store: KeyStore, pub remote_authority_public: sp_core::sr25519::Public, - pub remote_node_key: Keypair, + pub remote_node_key: sc_network::Keypair, pub local_worker: Option< Worker< TestApi, @@ -500,7 +496,7 @@ struct DhtValueFoundTester { sp_runtime::generic::Header, substrate_test_runtime_client::runtime::Extrinsic, >, - std::pin::Pin>>, + std::pin::Pin>>, >, >, } @@ -512,7 +508,7 @@ impl DhtValueFoundTester { block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) .unwrap(); - let remote_node_key = Keypair::generate_ed25519(); + let remote_node_key = sc_network::Keypair::generate_ed25519(); Self { remote_key_store, remote_authority_public, remote_node_key, local_worker: None } } @@ -527,11 +523,11 @@ impl DhtValueFoundTester { fn process_value_found( &mut self, strict_record_validation: bool, - values: Vec<(KademliaKey, Vec)>, + values: Vec<(sc_network::KademliaKey, Vec)>, ) -> Option<&HashSet> { let (_dht_event_tx, dht_event_rx) = channel(1); let local_test_api = - Arc::new(TestApi { authorities: vec![self.remote_authority_public.into()] }); + Arc::new(TestApi { authorities: vec![self.remote_authority_public.clone().into()] }); let local_network: Arc = Arc::new(Default::default()); let local_key_store = KeyStore::new(); @@ -556,7 +552,8 @@ impl DhtValueFoundTester { self.local_worker .as_ref() .map(|w| { - w.addr_cache.get_addresses_by_authority_id(&self.remote_authority_public.into()) + w.addr_cache + .get_addresses_by_authority_id(&self.remote_authority_public.clone().into()) }) .unwrap() } @@ -569,7 +566,7 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { let addresses = (1..100).map(|i| tester.multiaddr_with_peer_id(i)).collect(); let kv_pairs = block_on(build_dht_event::( addresses, - tester.remote_authority_public.into(), + tester.remote_authority_public.clone().into(), &tester.remote_key_store, None, )); @@ -584,9 +581,9 @@ fn strict_accept_address_with_peer_signature() { let addr = tester.multiaddr_with_peer_id(1); let kv_pairs = block_on(build_dht_event( vec![addr.clone()], - tester.remote_authority_public.into(), + tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&TestSigner { keypair: &tester.remote_node_key }), + Some(&tester.remote_node_key), )); let cached_remote_addresses = tester.process_value_found(true, kv_pairs); @@ -601,12 +598,12 @@ fn strict_accept_address_with_peer_signature() { #[test] fn reject_address_with_rogue_peer_signature() { let mut tester = DhtValueFoundTester::new(); - let rogue_remote_node_key = Keypair::generate_ed25519(); + let rogue_remote_node_key = sc_network::Keypair::generate_ed25519(); let kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.into(), + tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&TestSigner { keypair: &rogue_remote_node_key }), + Some(&rogue_remote_node_key), )); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); @@ -622,9 +619,9 @@ fn reject_address_with_invalid_peer_signature() { let mut tester = DhtValueFoundTester::new(); let mut kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.into(), + tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&TestSigner { keypair: &tester.remote_node_key }), + Some(&tester.remote_node_key), )); // tamper with the signature let mut record = schema::SignedAuthorityRecord::decode(kv_pairs[0].1.as_slice()).unwrap(); @@ -644,7 +641,7 @@ fn reject_address_without_peer_signature() { let mut tester = DhtValueFoundTester::new(); let kv_pairs = block_on(build_dht_event::( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.into(), + tester.remote_authority_public.clone().into(), &tester.remote_key_store, None, )); @@ -662,7 +659,7 @@ fn do_not_cache_addresses_without_peer_id() { "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); let kv_pairs = block_on(build_dht_event::( vec![multiaddr_with_peer_id.clone(), multiaddr_without_peer_id], - tester.remote_authority_public.into(), + tester.remote_authority_public.clone().into(), &tester.remote_key_store, None, )); @@ -811,7 +808,7 @@ fn lookup_throttling() { None, ) .await; - DhtEvent::ValueFound(kv_pairs) + sc_network::DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -825,7 +822,7 @@ fn lookup_throttling() { // Make second one fail. let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let dht_event = DhtEvent::ValueNotFound(remote_hash); + let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); // Assert worker to trigger another lookup. diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 43493ada051f8..a2ccba486ae29 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -31,6 +31,6 @@ sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] -parking_lot = "0.12.1" +parking_lot = "0.12.0" sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index b69294bf6ccb0..bc328c40edb3c 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -736,7 +736,7 @@ mod tests { let api = client.runtime_api(); api.execute_block(&block_id, proposal.block).unwrap(); - let state = backend.state_at(genesis_hash).unwrap(); + let state = backend.state_at(block_id).unwrap(); let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); @@ -855,18 +855,10 @@ mod tests { .expect("header get error") .expect("there should be header"); - let extrinsics_num = 5; - let extrinsics = std::iter::once( - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 100, - nonce: 0, - } - .into_signed_tx(), - ) - .chain((0..extrinsics_num - 1).map(|v| Extrinsic::IncludeData(vec![v as u8; 10]))) - .collect::>(); + let extrinsics_num = 4; + let extrinsics = (0..extrinsics_num) + .map(|v| Extrinsic::IncludeData(vec![v as u8; 10])) + .collect::>(); let block_limit = genesis_header.encoded_size() + extrinsics @@ -930,9 +922,8 @@ mod tests { .unwrap(); // The block limit didn't changed, but we now include the proof in the estimation of the - // block size and thus, only the `Transfer` will fit into the block. It reads more data - // than we have reserved in the block limit. - assert_eq!(block.extrinsics().len(), 1); + // block size and thus, one less transaction should fit into the limit. + assert_eq!(block.extrinsics().len(), extrinsics_num - 2); } #[test] diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index a125d4c8d4f07..e219420959c9f 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -9,14 +9,14 @@ description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] -array-bytes = "4.1" -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3" futures-timer = "3.0.1" +hex = "0.4.2" log = "0.4" -parking_lot = "0.12.1" +parking_lot = "0.12.0" thiserror = "1.0" wasm-timer = "0.2.5" beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } @@ -27,7 +27,6 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../client/finality-grandpa" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index 3ccf83c1f5106..46ee7640d710a 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -13,7 +13,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive futures = "0.3.21" jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } log = "0.4" -parking_lot = "0.12.1" +parking_lot = "0.12.0" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" beefy-gadget = { version = "4.0.0-dev", path = "../." } @@ -24,7 +24,7 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" sc-rpc = { version = "4.0.0-dev", features = [ "test-helpers", ], path = "../../rpc" } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index d29ed433c38db..91ff59324bd95 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -35,9 +35,7 @@ use jsonrpsee::{ }; use log::warn; -use beefy_gadget::communication::notification::{ - BeefyBestBlockStream, BeefyVersionedFinalityProofStream, -}; +use beefy_gadget::notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}; mod notification; @@ -84,7 +82,7 @@ impl From for JsonRpseeError { // Provides RPC methods for interacting with BEEFY. #[rpc(client, server)] pub trait BeefyApi { - /// Returns the block most recently finalized by BEEFY, alongside its justification. + /// Returns the block most recently finalized by BEEFY, alongside side its justification. #[subscription( name = "beefy_subscribeJustifications" => "beefy_justifications", unsubscribe = "beefy_unsubscribeJustifications", @@ -103,7 +101,7 @@ pub trait BeefyApi { /// Implements the BeefyApi RPC trait for interacting with BEEFY. pub struct Beefy { - finality_proof_stream: BeefyVersionedFinalityProofStream, + signed_commitment_stream: BeefySignedCommitmentStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } @@ -114,7 +112,7 @@ where { /// Creates a new Beefy Rpc handler instance. pub fn new( - finality_proof_stream: BeefyVersionedFinalityProofStream, + signed_commitment_stream: BeefySignedCommitmentStream, best_block_stream: BeefyBestBlockStream, executor: SubscriptionTaskExecutor, ) -> Result { @@ -128,21 +126,20 @@ where }); executor.spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed()); - Ok(Self { finality_proof_stream, beefy_best_block, executor }) + Ok(Self { signed_commitment_stream, beefy_best_block, executor }) } } #[async_trait] -impl BeefyApiServer - for Beefy +impl BeefyApiServer for Beefy where Block: BlockT, { fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self - .finality_proof_stream + .signed_commitment_stream .subscribe() - .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); + .map(|sc| notification::EncodedSignedCommitment::new::(sc)); let fut = async move { sink.pipe_from_stream(stream).await; @@ -167,31 +164,31 @@ mod tests { use super::*; use beefy_gadget::{ - communication::notification::BeefyVersionedFinalityProofSender, - justification::BeefyVersionedFinalityProof, + justification::BeefySignedCommitment, + notification::{BeefyBestBlockStream, BeefySignedCommitmentSender}, }; - use beefy_primitives::{known_payloads, Payload, SignedCommitment}; + use beefy_primitives::{known_payload_ids, Payload}; use codec::{Decode, Encode}; use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefyVersionedFinalityProofSender) { + fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefyVersionedFinalityProofSender) { - let (finality_proof_sender, finality_proof_stream) = - BeefyVersionedFinalityProofStream::::channel(); + ) -> (RpcModule>, BeefySignedCommitmentSender) { + let (commitment_sender, commitment_stream) = + BeefySignedCommitmentStream::::channel(); let handler = - Beefy::new(finality_proof_stream, best_block_stream, sc_rpc::testing::test_executor()) + Beefy::new(commitment_stream, best_block_stream, sc_rpc::testing::test_executor()) .expect("Setting up the BEEFY RPC handler works"); - (handler.into_rpc(), finality_proof_sender) + (handler.into_rpc(), commitment_sender) } #[tokio::test] @@ -265,22 +262,21 @@ mod tests { assert_eq!(response.result, expected); } - fn create_finality_proof() -> BeefyVersionedFinalityProof { - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); - BeefyVersionedFinalityProof::::V1(SignedCommitment { + fn create_commitment() -> BeefySignedCommitment { + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + BeefySignedCommitment:: { commitment: beefy_primitives::Commitment { payload, block_number: 5, validator_set_id: 0, }, signatures: vec![], - }) + } } #[tokio::test] async fn subscribe_and_listen_to_one_justification() { - let (rpc, finality_proof_sender) = setup_io_handler(); + let (rpc, commitment_sender) = setup_io_handler(); // Subscribe let mut sub = rpc @@ -288,16 +284,16 @@ mod tests { .await .unwrap(); - // Notify with finality_proof - let finality_proof = create_finality_proof(); - let r: Result<(), ()> = finality_proof_sender.notify(|| Ok(finality_proof.clone())); + // Notify with commitment + let commitment = create_commitment(); + let r: Result<(), ()> = commitment_sender.notify(|| Ok(commitment.clone())); r.unwrap(); // Inspect what we received let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); - let recv_finality_proof: BeefyVersionedFinalityProof = + let recv_commitment: BeefySignedCommitment = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(&recv_sub_id, sub.subscription_id()); - assert_eq!(recv_finality_proof, finality_proof); + assert_eq!(recv_commitment, commitment); } } diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs index a815425644d52..cdda667782dd5 100644 --- a/client/beefy/rpc/src/notification.rs +++ b/client/beefy/rpc/src/notification.rs @@ -21,19 +21,19 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::Block as BlockT; -/// An encoded finality proof proving that the given header has been finalized. +/// An encoded signed commitment proving that the given header has been finalized. /// The given bytes should be the SCALE-encoded representation of a -/// `beefy_primitives::VersionedFinalityProof`. +/// `beefy_primitives::SignedCommitment`. #[derive(Clone, Serialize, Deserialize)] -pub struct EncodedVersionedFinalityProof(sp_core::Bytes); +pub struct EncodedSignedCommitment(sp_core::Bytes); -impl EncodedVersionedFinalityProof { +impl EncodedSignedCommitment { pub fn new( - finality_proof: beefy_gadget::justification::BeefyVersionedFinalityProof, + signed_commitment: beefy_gadget::justification::BeefySignedCommitment, ) -> Self where Block: BlockT, { - EncodedVersionedFinalityProof(finality_proof.encode().into()) + EncodedSignedCommitment(signed_commitment.encode().into()) } } diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs deleted file mode 100644 index 91798d4ae0d33..0000000000000 --- a/client/beefy/src/communication/mod.rs +++ /dev/null @@ -1,113 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Communication streams for the BEEFY networking protocols. - -pub mod notification; -pub mod request_response; - -pub(crate) mod gossip; -pub(crate) mod peers; - -pub(crate) mod beefy_protocol_name { - use array_bytes::bytes2hex; - use sc_network::ProtocolName; - - /// BEEFY votes gossip protocol name suffix. - const GOSSIP_NAME: &str = "/beefy/1"; - /// BEEFY justifications protocol name suffix. - const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; - - /// Name of the votes gossip protocol used by BEEFY. - /// - /// Must be registered towards the networking in order for BEEFY voter to properly function. - pub fn gossip_protocol_name>( - genesis_hash: Hash, - fork_id: Option<&str>, - ) -> ProtocolName { - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, GOSSIP_NAME).into() - } else { - format!("/{}{}", bytes2hex("", genesis_hash), GOSSIP_NAME).into() - } - } - - /// Name of the BEEFY justifications request-response protocol. - pub fn justifications_protocol_name>( - genesis_hash: Hash, - fork_id: Option<&str>, - ) -> ProtocolName { - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, JUSTIFICATIONS_NAME).into() - } else { - format!("/{}{}", bytes2hex("", genesis_hash), JUSTIFICATIONS_NAME).into() - } - } -} - -/// Returns the configuration value to put in -/// [`sc_network::config::NetworkConfiguration::extra_sets`]. -/// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. -pub fn beefy_peers_set_config( - gossip_protocol_name: sc_network::ProtocolName, -) -> sc_network_common::config::NonDefaultSetConfig { - let mut cfg = - sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); - cfg.allow_non_reserved(25, 25); - cfg -} - -#[cfg(test)] -mod tests { - use super::*; - - use sp_core::H256; - - #[test] - fn beefy_protocols_names() { - use beefy_protocol_name::{gossip_protocol_name, justifications_protocol_name}; - // Create protocol name using random genesis hash. - let genesis_hash = H256::random(); - let genesis_hex = array_bytes::bytes2hex("", genesis_hash.as_ref()); - - let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); - let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); - assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); - - let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); - let justif_proto_name = justifications_protocol_name(&genesis_hash, None); - assert_eq!(justif_proto_name.to_string(), expected_justif_name); - - // Create protocol name using hardcoded genesis hash. Verify exact representation. - let genesis_hash = [ - 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, - 94, 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, - ]; - let genesis_hex = "32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93"; - - let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); - let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); - assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); - - let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); - let justif_proto_name = justifications_protocol_name(&genesis_hash, None); - assert_eq!(justif_proto_name.to_string(), expected_justif_name); - } -} diff --git a/client/beefy/src/communication/peers.rs b/client/beefy/src/communication/peers.rs deleted file mode 100644 index 0e20a0f4e0ff6..0000000000000 --- a/client/beefy/src/communication/peers.rs +++ /dev/null @@ -1,131 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Logic for keeping track of BEEFY peers. - -// TODO (issue #12296): replace this naive peer tracking with generic one that infers data -// from multiple network protocols. - -use sc_network::PeerId; -use sp_runtime::traits::{Block, NumberFor, Zero}; -use std::collections::{HashMap, VecDeque}; - -struct PeerData { - last_voted_on: NumberFor, -} - -impl Default for PeerData { - fn default() -> Self { - PeerData { last_voted_on: Zero::zero() } - } -} - -/// Keep a simple map of connected peers -/// and the most recent voting round they participated in. -pub struct KnownPeers { - live: HashMap>, -} - -impl KnownPeers { - pub fn new() -> Self { - Self { live: HashMap::new() } - } - - /// Add new connected `peer`. - pub fn add_new(&mut self, peer: PeerId) { - self.live.entry(peer).or_default(); - } - - /// Note vote round number for `peer`. - pub fn note_vote_for(&mut self, peer: PeerId, round: NumberFor) { - let data = self.live.entry(peer).or_default(); - data.last_voted_on = round.max(data.last_voted_on); - } - - /// Remove connected `peer`. - pub fn remove(&mut self, peer: &PeerId) { - self.live.remove(peer); - } - - /// Return _filtered and cloned_ list of peers that have voted on `block` or higher. - pub fn at_least_at_block(&self, block: NumberFor) -> VecDeque { - self.live - .iter() - .filter_map(|(k, v)| (v.last_voted_on >= block).then_some(k)) - .cloned() - .collect() - } - - /// Answer whether `peer` is part of `KnownPeers` set. - pub fn contains(&self, peer: &PeerId) -> bool { - self.live.contains_key(peer) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_track_known_peers_progress() { - let (alice, bob, charlie) = (PeerId::random(), PeerId::random(), PeerId::random()); - let mut peers = KnownPeers::::new(); - assert!(peers.live.is_empty()); - - // Alice and Bob new connected peers. - peers.add_new(alice); - peers.add_new(bob); - // 'Tracked' Bob seen voting for 5. - peers.note_vote_for(bob, 5); - // Previously unseen Charlie now seen voting for 10. - peers.note_vote_for(charlie, 10); - - assert_eq!(peers.live.len(), 3); - assert!(peers.contains(&alice)); - assert!(peers.contains(&bob)); - assert!(peers.contains(&charlie)); - - // Get peers at block >= 5 - let at_5 = peers.at_least_at_block(5); - // Should be Bob and Charlie - assert_eq!(at_5.len(), 2); - assert!(at_5.contains(&bob)); - assert!(at_5.contains(&charlie)); - - // 'Tracked' Alice seen voting for 10. - peers.note_vote_for(alice, 10); - - // Get peers at block >= 9 - let at_9 = peers.at_least_at_block(9); - // Should be Charlie and Alice - assert_eq!(at_9.len(), 2); - assert!(at_9.contains(&charlie)); - assert!(at_9.contains(&alice)); - - // Remove Alice - peers.remove(&alice); - assert_eq!(peers.live.len(), 2); - assert!(!peers.contains(&alice)); - - // Get peers at block >= 9 - let at_9 = peers.at_least_at_block(9); - // Now should be just Charlie - assert_eq!(at_9.len(), 1); - assert!(at_9.contains(&charlie)); - } -} diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs deleted file mode 100644 index 9f02b7162b54c..0000000000000 --- a/client/beefy/src/communication/request_response/incoming_requests_handler.rs +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Helper for handling (i.e. answering) BEEFY justifications requests from a remote peer. - -use beefy_primitives::BEEFY_ENGINE_ID; -use codec::Decode; -use futures::{ - channel::{mpsc, oneshot}, - StreamExt, -}; -use log::{debug, trace}; -use sc_client_api::BlockBackend; -use sc_network::{config as netconfig, config::RequestResponseConfig, PeerId, ReputationChange}; -use sc_network_common::protocol::ProtocolName; -use sp_runtime::traits::Block; -use std::{marker::PhantomData, sync::Arc}; - -use crate::communication::request_response::{ - on_demand_justifications_protocol_config, Error, JustificationRequest, -}; - -/// A request coming in, including a sender for sending responses. -#[derive(Debug)] -pub(crate) struct IncomingRequest { - /// `PeerId` of sending peer. - pub peer: PeerId, - /// The sent request. - pub payload: JustificationRequest, - /// Sender for sending response back. - pub pending_response: oneshot::Sender, -} - -impl IncomingRequest { - /// Create new `IncomingRequest`. - pub fn new( - peer: PeerId, - payload: JustificationRequest, - pending_response: oneshot::Sender, - ) -> Self { - Self { peer, payload, pending_response } - } - - /// Try building from raw network request. - /// - /// This function will fail if the request cannot be decoded and will apply passed in - /// reputation changes in that case. - /// - /// Params: - /// - The raw request to decode - /// - Reputation changes to apply for the peer in case decoding fails. - pub fn try_from_raw( - raw: netconfig::IncomingRequest, - reputation_changes: Vec, - ) -> Result { - let netconfig::IncomingRequest { payload, peer, pending_response } = raw; - let payload = match JustificationRequest::decode(&mut payload.as_ref()) { - Ok(payload) => payload, - Err(err) => { - let response = netconfig::OutgoingResponse { - result: Err(()), - reputation_changes, - sent_feedback: None, - }; - if let Err(_) = pending_response.send(response) { - return Err(Error::DecodingErrorNoReputationChange(peer, err)) - } - return Err(Error::DecodingError(peer, err)) - }, - }; - Ok(Self::new(peer, payload, pending_response)) - } -} - -/// Receiver for incoming BEEFY justifications requests. -/// -/// Takes care of decoding and handling of invalid encoded requests. -pub(crate) struct IncomingRequestReceiver { - raw: mpsc::Receiver, -} - -impl IncomingRequestReceiver { - pub fn new(inner: mpsc::Receiver) -> Self { - Self { raw: inner } - } - - /// Try to receive the next incoming request. - /// - /// Any received request will be decoded, on decoding errors the provided reputation changes - /// will be applied and an error will be reported. - pub async fn recv(&mut self, reputation_changes: F) -> Result, Error> - where - B: Block, - F: FnOnce() -> Vec, - { - let req = match self.raw.next().await { - None => return Err(Error::RequestChannelExhausted), - Some(raw) => IncomingRequest::::try_from_raw(raw, reputation_changes())?, - }; - Ok(req) - } -} - -/// Handler for incoming BEEFY justifications requests from a remote peer. -pub struct BeefyJustifsRequestHandler { - pub(crate) request_receiver: IncomingRequestReceiver, - pub(crate) justif_protocol_name: ProtocolName, - pub(crate) client: Arc, - pub(crate) _block: PhantomData, -} - -impl BeefyJustifsRequestHandler -where - B: Block, - Client: BlockBackend + Send + Sync, -{ - /// Create a new [`BeefyJustifsRequestHandler`]. - pub fn new>( - genesis_hash: Hash, - fork_id: Option<&str>, - client: Arc, - ) -> (Self, RequestResponseConfig) { - let (request_receiver, config) = - on_demand_justifications_protocol_config(genesis_hash, fork_id); - let justif_protocol_name = config.name.clone(); - - (Self { request_receiver, justif_protocol_name, client, _block: PhantomData }, config) - } - - /// Network request-response protocol name used by this handler. - pub fn protocol_name(&self) -> ProtocolName { - self.justif_protocol_name.clone() - } - - // Sends back justification response if justification found in client backend. - fn handle_request(&self, request: IncomingRequest) -> Result<(), Error> { - // TODO (issue #12293): validate `request` and change peer reputation for invalid requests. - - let maybe_encoded_proof = if let Some(hash) = - self.client.block_hash(request.payload.begin).map_err(Error::Client)? - { - self.client - .justifications(hash) - .map_err(Error::Client)? - .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) - // No BEEFY justification present. - .ok_or(()) - } else { - Err(()) - }; - - request - .pending_response - .send(netconfig::OutgoingResponse { - result: maybe_encoded_proof, - reputation_changes: Vec::new(), - sent_feedback: None, - }) - .map_err(|_| Error::SendResponse) - } - - /// Run [`BeefyJustifsRequestHandler`]. - pub async fn run(mut self) { - trace!(target: "beefy::sync", "🥩 Running BeefyJustifsRequestHandler"); - - while let Ok(request) = self.request_receiver.recv(|| vec![]).await { - let peer = request.peer; - match self.handle_request(request) { - Ok(()) => { - debug!( - target: "beefy::sync", - "🥩 Handled BEEFY justification request from {:?}.", peer - ) - }, - Err(e) => { - // TODO (issue #12293): apply reputation changes here based on error type. - debug!( - target: "beefy::sync", - "🥩 Failed to handle BEEFY justification request from {:?}: {}", peer, e, - ) - }, - } - } - } -} diff --git a/client/beefy/src/communication/request_response/mod.rs b/client/beefy/src/communication/request_response/mod.rs deleted file mode 100644 index c83bb9d57e91b..0000000000000 --- a/client/beefy/src/communication/request_response/mod.rs +++ /dev/null @@ -1,101 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Request/response protocol for syncing BEEFY justifications. - -mod incoming_requests_handler; -pub(crate) mod outgoing_requests_engine; - -pub use incoming_requests_handler::BeefyJustifsRequestHandler; - -use futures::channel::mpsc; -use std::time::Duration; - -use codec::{Decode, Encode, Error as CodecError}; -use sc_network::{config::RequestResponseConfig, PeerId}; -use sp_runtime::traits::{Block, NumberFor}; - -use crate::communication::beefy_protocol_name::justifications_protocol_name; -use incoming_requests_handler::IncomingRequestReceiver; - -// 10 seems reasonable, considering justifs are explicitly requested only -// for mandatory blocks, by nodes that are syncing/catching-up. -const JUSTIF_CHANNEL_SIZE: usize = 10; - -const MAX_RESPONSE_SIZE: u64 = 1024 * 1024; -const JUSTIF_REQUEST_TIMEOUT: Duration = Duration::from_secs(3); - -/// Get the configuration for the BEEFY justifications Request/response protocol. -/// -/// Returns a receiver for messages received on this protocol and the requested -/// `ProtocolConfig`. -/// -/// Consider using [`BeefyJustifsRequestHandler`] instead of this low-level function. -pub(crate) fn on_demand_justifications_protocol_config>( - genesis_hash: Hash, - fork_id: Option<&str>, -) -> (IncomingRequestReceiver, RequestResponseConfig) { - let name = justifications_protocol_name(genesis_hash, fork_id); - let fallback_names = vec![]; - let (tx, rx) = mpsc::channel(JUSTIF_CHANNEL_SIZE); - let rx = IncomingRequestReceiver::new(rx); - let cfg = RequestResponseConfig { - name, - fallback_names, - max_request_size: 32, - max_response_size: MAX_RESPONSE_SIZE, - // We are connected to all validators: - request_timeout: JUSTIF_REQUEST_TIMEOUT, - inbound_queue: Some(tx), - }; - (rx, cfg) -} - -/// BEEFY justification request. -#[derive(Debug, Clone, Encode, Decode)] -pub struct JustificationRequest { - /// Start collecting proofs from this block. - pub begin: NumberFor, -} - -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error(transparent)] - Client(#[from] sp_blockchain::Error), - - #[error(transparent)] - RuntimeApi(#[from] sp_api::ApiError), - - /// Decoding failed, we were able to change the peer's reputation accordingly. - #[error("Decoding request failed for peer {0}.")] - DecodingError(PeerId, #[source] CodecError), - - /// Decoding failed, but sending reputation change failed. - #[error("Decoding request failed for peer {0}, and changing reputation failed.")] - DecodingErrorNoReputationChange(PeerId, #[source] CodecError), - - /// Incoming request stream exhausted. Should only happen on shutdown. - #[error("Incoming request channel got closed.")] - RequestChannelExhausted, - - #[error("Failed to send response.")] - SendResponse, - - #[error("Received invalid response.")] - InvalidResponse, -} diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs deleted file mode 100644 index c4d3c926190e6..0000000000000 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ /dev/null @@ -1,241 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Generating request logic for request/response protocol for syncing BEEFY justifications. - -use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; -use codec::Encode; -use futures::channel::{oneshot, oneshot::Canceled}; -use log::{debug, error, warn}; -use parking_lot::Mutex; -use sc_network::{PeerId, ProtocolName}; -use sc_network_common::{ - request_responses::{IfDisconnected, RequestFailure}, - service::NetworkRequest, -}; -use sp_api::ProvideRuntimeApi; -use sp_runtime::{ - generic::BlockId, - traits::{Block, NumberFor}, -}; -use std::{collections::VecDeque, result::Result, sync::Arc}; - -use crate::{ - communication::request_response::{Error, JustificationRequest}, - justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, - KnownPeers, -}; - -/// Response type received from network. -type Response = Result, RequestFailure>; -/// Used to receive a response from the network. -type ResponseReceiver = oneshot::Receiver; - -enum State { - Idle, - AwaitingResponse(PeerId, NumberFor, ResponseReceiver), -} - -pub struct OnDemandJustificationsEngine { - network: Arc, - runtime: Arc, - protocol_name: ProtocolName, - - live_peers: Arc>>, - peers_cache: VecDeque, - - state: State, -} - -impl OnDemandJustificationsEngine -where - B: Block, - R: ProvideRuntimeApi, - R::Api: BeefyApi, -{ - pub fn new( - network: Arc, - runtime: Arc, - protocol_name: ProtocolName, - live_peers: Arc>>, - ) -> Self { - Self { - network, - runtime, - protocol_name, - live_peers, - peers_cache: VecDeque::new(), - state: State::Idle, - } - } - - fn reset_peers_cache_for_block(&mut self, block: NumberFor) { - // TODO (issue #12296): replace peer selection with generic one that involves all protocols. - self.peers_cache = self.live_peers.lock().at_least_at_block(block); - } - - fn try_next_peer(&mut self) -> Option { - // TODO (issue #12296): replace peer selection with generic one that involves all protocols. - let live = self.live_peers.lock(); - while let Some(peer) = self.peers_cache.pop_front() { - if live.contains(&peer) { - return Some(peer) - } - } - None - } - - fn request_from_peer(&mut self, peer: PeerId, block: NumberFor) { - debug!(target: "beefy::sync", "🥩 requesting justif #{:?} from peer {:?}", block, peer); - - let payload = JustificationRequest:: { begin: block }.encode(); - - let (tx, rx) = oneshot::channel(); - - self.network.start_request( - peer, - self.protocol_name.clone(), - payload, - tx, - IfDisconnected::ImmediateError, - ); - - self.state = State::AwaitingResponse(peer, block, rx); - } - - /// If no other request is in progress, start new justification request for `block`. - pub fn request(&mut self, block: NumberFor) { - // ignore new requests while there's already one pending - if matches!(self.state, State::AwaitingResponse(_, _, _)) { - return - } - self.reset_peers_cache_for_block(block); - - // Start the requests engine - each unsuccessful received response will automatically - // trigger a new request to the next peer in the `peers_cache` until there are none left. - if let Some(peer) = self.try_next_peer() { - self.request_from_peer(peer, block); - } else { - debug!(target: "beefy::sync", "🥩 no good peers to request justif #{:?} from", block); - } - } - - /// Cancel any pending request for block numbers smaller or equal to `block`. - pub fn cancel_requests_older_than(&mut self, block: NumberFor) { - match &self.state { - State::AwaitingResponse(_, number, _) if *number <= block => { - debug!( - target: "beefy::sync", - "🥩 cancel pending request for justification #{:?}", - number - ); - self.state = State::Idle; - }, - _ => (), - } - } - - fn process_response( - &mut self, - peer: PeerId, - block: NumberFor, - validator_set: &ValidatorSet, - response: Result, - ) -> Result, Error> { - response - .map_err(|e| { - debug!( - target: "beefy::sync", - "🥩 for on demand justification #{:?}, peer {:?} hung up: {:?}", - block, peer, e - ); - Error::InvalidResponse - })? - .map_err(|e| { - debug!( - target: "beefy::sync", - "🥩 for on demand justification #{:?}, peer {:?} error: {:?}", - block, peer, e - ); - Error::InvalidResponse - }) - .and_then(|encoded| { - decode_and_verify_finality_proof::(&encoded[..], block, &validator_set).map_err( - |e| { - debug!( - target: "beefy::sync", - "🥩 for on demand justification #{:?}, peer {:?} responded with invalid proof: {:?}", - block, peer, e - ); - Error::InvalidResponse - }, - ) - }) - } - - pub async fn next(&mut self) -> Option> { - let (peer, block, resp) = match &mut self.state { - State::Idle => { - futures::pending!(); - // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. - return None - }, - State::AwaitingResponse(peer, block, receiver) => { - let resp = receiver.await; - (*peer, *block, resp) - }, - }; - // We received the awaited response. Our 'receiver' will never generate any other response, - // meaning we're done with current state. Move the engine to `State::Idle`. - self.state = State::Idle; - - let block_id = BlockId::number(block); - let validator_set = self - .runtime - .runtime_api() - .validator_set(&block_id) - .map_err(|e| { - error!(target: "beefy::sync", "🥩 Runtime API error {:?} in on-demand justif engine.", e); - e - }) - .ok()? - .or_else(|| { - error!(target: "beefy::sync", "🥩 BEEFY pallet not available for block {:?}.", block); - None - })?; - - self.process_response(peer, block, &validator_set, resp) - .map_err(|_| { - // No valid justification received, try next peer in our set. - if let Some(peer) = self.try_next_peer() { - self.request_from_peer(peer, block); - } else { - warn!(target: "beefy::sync", "🥩 ran out of peers to request justif #{:?} from", block); - } - }) - .map(|proof| { - debug!( - target: "beefy::sync", - "🥩 received valid on-demand justif #{:?} from {:?}", - block, peer - ); - proof - }) - .ok() - } -} diff --git a/client/beefy/src/communication/gossip.rs b/client/beefy/src/gossip.rs similarity index 93% rename from client/beefy/src/communication/gossip.rs rename to client/beefy/src/gossip.rs index 520548b943f96..02d5efe9e0e58 100644 --- a/client/beefy/src/communication/gossip.rs +++ b/client/beefy/src/gossip.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::BTreeMap, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, time::Duration}; use sc_network::PeerId; use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; @@ -28,12 +28,13 @@ use log::{debug, trace}; use parking_lot::{Mutex, RwLock}; use wasm_timer::Instant; -use crate::{communication::peers::KnownPeers, keystore::BeefyKeystore}; use beefy_primitives::{ crypto::{Public, Signature}, VoteMessage, }; +use crate::keystore::BeefyKeystore; + // Timeout for rebroadcasting messages. const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); @@ -102,19 +103,17 @@ where topic: B::Hash, known_votes: RwLock>, next_rebroadcast: Mutex, - known_peers: Arc>>, } impl GossipValidator where B: Block, { - pub fn new(known_peers: Arc>>) -> GossipValidator { + pub fn new() -> GossipValidator { GossipValidator { topic: topic::(), known_votes: RwLock::new(KnownVotes::new()), next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), - known_peers, } } @@ -166,7 +165,6 @@ where if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { self.known_votes.write().add_known(&round, msg_hash); - self.known_peers.lock().note_vote_for(*sender, round); return ValidationResult::ProcessAndKeep(self.topic) } else { // TODO: report peer @@ -237,7 +235,8 @@ mod tests { use crate::keystore::{tests::Keyring, BeefyKeystore}; use beefy_primitives::{ - crypto::Signature, known_payloads, Commitment, MmrRootHash, Payload, VoteMessage, KEY_TYPE, + crypto::Signature, known_payload_ids, Commitment, MmrRootHash, Payload, VoteMessage, + KEY_TYPE, }; use super::*; @@ -272,7 +271,7 @@ mod tests { #[test] fn note_and_drop_round_works() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let gv = GossipValidator::::new(); gv.note_round(1u64); @@ -299,7 +298,7 @@ mod tests { #[test] fn note_same_round_twice() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let gv = GossipValidator::::new(); gv.note_round(3u64); gv.note_round(7u64); @@ -347,10 +346,7 @@ mod tests { } fn dummy_vote(block_number: u64) -> VoteMessage { - let payload = Payload::from_single_entry( - known_payloads::MMR_ROOT_ID, - MmrRootHash::default().encode(), - ); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, MmrRootHash::default().encode()); let commitment = Commitment { payload, block_number, validator_set_id: 0 }; let signature = sign_commitment(&Keyring::Alice, &commitment); @@ -359,7 +355,7 @@ mod tests { #[test] fn should_avoid_verifying_signatures_twice() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let gv = GossipValidator::::new(); let sender = sc_network::PeerId::random(); let mut context = TestContext; @@ -395,7 +391,7 @@ mod tests { #[test] fn messages_allowed_and_expired() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let gv = GossipValidator::::new(); let sender = sc_network::PeerId::random(); let topic = Default::default(); let intent = MessageIntent::Broadcast; @@ -438,7 +434,7 @@ mod tests { #[test] fn messages_rebroadcast() { - let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); + let gv = GossipValidator::::new(); let sender = sc_network::PeerId::random(); let topic = Default::default(); diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index 0ed50d0ec8c98..7caeb49db5e50 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -16,12 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use beefy_primitives::{BeefyApi, BEEFY_ENGINE_ID}; -use log::debug; +use beefy_primitives::{crypto::Signature, BeefyApi, VersionedFinalityProof, BEEFY_ENGINE_ID}; +use codec::Encode; +use log::error; use std::{collections::HashMap, sync::Arc}; use sp_api::{ProvideRuntimeApi, TransactionFor}; -use sp_blockchain::well_known_cache_keys; +use sp_blockchain::{well_known_cache_keys, HeaderBackend}; use sp_consensus::Error as ConsensusError; use sp_runtime::{ generic::BlockId, @@ -33,8 +34,7 @@ use sc_client_api::backend::Backend; use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; use crate::{ - communication::notification::BeefyVersionedFinalityProofSender, - justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + justification::decode_and_verify_commitment, notification::BeefySignedCommitmentSender, }; /// A block-import handler for BEEFY. @@ -47,7 +47,7 @@ pub struct BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefyVersionedFinalityProofSender, + justification_sender: BeefySignedCommitmentSender, } impl Clone for BeefyBlockImport { @@ -67,7 +67,7 @@ impl BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefyVersionedFinalityProofSender, + justification_sender: BeefySignedCommitmentSender, ) -> BeefyBlockImport { BeefyBlockImport { backend, runtime, inner, justification_sender } } @@ -78,14 +78,14 @@ where Block: BlockT, BE: Backend, Runtime: ProvideRuntimeApi, - Runtime::Api: BeefyApi + Send, + Runtime::Api: BeefyApi + Send + Sync, { fn decode_and_verify( &self, encoded: &EncodedJustification, number: NumberFor, hash: ::Hash, - ) -> Result, ConsensusError> { + ) -> Result, Signature>, ConsensusError> { let block_id = BlockId::hash(hash); let validator_set = self .runtime @@ -94,7 +94,35 @@ where .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .ok_or_else(|| ConsensusError::ClientImport("Unknown validator set".to_string()))?; - decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) + decode_and_verify_commitment::(&encoded[..], number, &validator_set) + } + + /// Import BEEFY justification: Send it to worker for processing and also append it to backend. + /// + /// This function assumes: + /// - `justification` is verified and valid, + /// - the block referred by `justification` has been imported _and_ finalized. + fn import_beefy_justification_unchecked( + &self, + number: NumberFor, + justification: VersionedFinalityProof, Signature>, + ) { + // Append the justification to the block in the backend. + if let Err(e) = self.backend.append_justification( + BlockId::Number(number), + (BEEFY_ENGINE_ID, justification.encode()), + ) { + error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, justification); + } + // Send the justification to the BEEFY voter for processing. + match justification { + // TODO #11838: Should not unpack, these channels should also use + // `VersionedFinalityProof`. + VersionedFinalityProof::V1(signed_commitment) => self + .justification_sender + .notify(|| Ok::<_, ()>(signed_commitment)) + .expect("forwards closure result; the closure always returns Ok; qed."), + }; } } @@ -123,31 +151,42 @@ where let hash = block.post_hash(); let number = *block.header.number(); - let beefy_encoded = block.justifications.as_mut().and_then(|just| { - let encoded = just.get(BEEFY_ENGINE_ID).cloned(); - // Remove BEEFY justification from the list before giving to `inner`; we send it to the - // voter (beefy-gadget) and it will append it to the backend after block is finalized. - just.remove(BEEFY_ENGINE_ID); - encoded - }); + let beefy_proof = block + .justifications + .as_mut() + .and_then(|just| { + let decoded = just + .get(BEEFY_ENGINE_ID) + .map(|encoded| self.decode_and_verify(encoded, number, hash)); + // Remove BEEFY justification from the list before giving to `inner`; + // we will append it to backend ourselves at the end if all goes well. + just.remove(BEEFY_ENGINE_ID); + decoded + }) + .transpose() + .unwrap_or(None); // Run inner block import. let inner_import_result = self.inner.import_block(block, new_cache).await?; - match (beefy_encoded, &inner_import_result) { - (Some(encoded), ImportResult::Imported(_)) => { - if let Ok(proof) = self.decode_and_verify(&encoded, number, hash) { + match (beefy_proof, &inner_import_result) { + (Some(proof), ImportResult::Imported(_)) => { + let status = self.backend.blockchain().info(); + if number <= status.finalized_number && + Some(hash) == + self.backend + .blockchain() + .hash(number) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { // The proof is valid and the block is imported and final, we can import. - debug!(target: "beefy", "🥩 import justif {:?} for block number {:?}.", proof, number); - // Send the justification to the BEEFY voter for processing. - self.justification_sender - .notify(|| Ok::<_, ()>(proof)) - .expect("forwards closure result; the closure always returns Ok; qed."); + self.import_beefy_justification_unchecked(number, proof); } else { - debug!( + error!( target: "beefy", - "🥩 error decoding justification: {:?} for imported block {:?}", - encoded, number, + "🥩 Cannot import justification: {:?} for, not yet final, block number {:?}", + proof, + number, ); } }, diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index 7243c692727f0..2a5191daec4b5 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -25,17 +25,17 @@ use codec::{Decode, Encode}; use sp_consensus::Error as ConsensusError; use sp_runtime::traits::{Block as BlockT, NumberFor}; -/// A finality proof with matching BEEFY authorities' signatures. -pub type BeefyVersionedFinalityProof = - beefy_primitives::VersionedFinalityProof, Signature>; +/// A commitment with matching BEEFY authorities' signatures. +pub type BeefySignedCommitment = + beefy_primitives::SignedCommitment, beefy_primitives::crypto::Signature>; -/// Decode and verify a Beefy FinalityProof. -pub(crate) fn decode_and_verify_finality_proof( +/// Decode and verify a Beefy SignedCommitment. +pub(crate) fn decode_and_verify_commitment( encoded: &[u8], target_number: NumberFor, validator_set: &ValidatorSet, -) -> Result, ConsensusError> { - let proof = >::decode(&mut &*encoded) +) -> Result, Signature>, ConsensusError> { + let proof = , Signature>>::decode(&mut &*encoded) .map_err(|_| ConsensusError::InvalidJustification)?; verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) } @@ -44,7 +44,7 @@ pub(crate) fn decode_and_verify_finality_proof( fn verify_with_validator_set( target_number: NumberFor, validator_set: &ValidatorSet, - proof: &BeefyVersionedFinalityProof, + proof: &VersionedFinalityProof, Signature>, ) -> Result<(), ConsensusError> { match proof { VersionedFinalityProof::V1(signed_commitment) => { @@ -80,27 +80,25 @@ fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { - use beefy_primitives::{ - known_payloads, Commitment, Payload, SignedCommitment, VersionedFinalityProof, - }; + use beefy_primitives::{known_payload_ids, Commitment, Payload, SignedCommitment}; use substrate_test_runtime_client::runtime::Block; use super::*; use crate::{keystore::tests::Keyring, tests::make_beefy_ids}; - pub(crate) fn new_finality_proof( + pub(crate) fn new_signed_commitment( block_num: NumberFor, validator_set: &ValidatorSet, keys: &[Keyring], - ) -> BeefyVersionedFinalityProof { + ) -> BeefySignedCommitment { let commitment = Commitment { - payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; let message = commitment.encode(); let signatures = keys.iter().map(|key| Some(key.sign(&message))).collect(); - VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }) + SignedCommitment { commitment, signatures } } #[test] @@ -110,7 +108,7 @@ pub(crate) mod tests { // build valid justification let block_num = 42; - let proof = new_finality_proof(block_num, &validator_set, keys); + let proof = new_signed_commitment(block_num, &validator_set, keys); let good_proof = proof.clone().into(); // should verify successfully @@ -134,10 +132,7 @@ pub(crate) mod tests { // wrong signatures length -> should fail verification let mut bad_proof = proof.clone(); // change length of signatures - let bad_signed_commitment = match bad_proof { - VersionedFinalityProof::V1(ref mut sc) => sc, - }; - bad_signed_commitment.signatures.pop().flatten().unwrap(); + bad_proof.signatures.pop().flatten().unwrap(); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -145,11 +140,8 @@ pub(crate) mod tests { // not enough signatures -> should fail verification let mut bad_proof = proof.clone(); - let bad_signed_commitment = match bad_proof { - VersionedFinalityProof::V1(ref mut sc) => sc, - }; // remove a signature (but same length) - *bad_signed_commitment.signatures.first_mut().unwrap() = None; + *bad_proof.signatures.first_mut().unwrap() = None; match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -157,12 +149,9 @@ pub(crate) mod tests { // not enough _correct_ signatures -> should fail verification let mut bad_proof = proof.clone(); - let bad_signed_commitment = match bad_proof { - VersionedFinalityProof::V1(ref mut sc) => sc, - }; // change a signature to a different key - *bad_signed_commitment.signatures.first_mut().unwrap() = - Some(Keyring::Dave.sign(&bad_signed_commitment.commitment.encode())); + *bad_proof.signatures.first_mut().unwrap() = + Some(Keyring::Dave.sign(&proof.commitment.encode())); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -170,19 +159,19 @@ pub(crate) mod tests { } #[test] - fn should_decode_and_verify_finality_proof() { + fn should_decode_and_verify_commitment() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let block_num = 1; // build valid justification - let proof = new_finality_proof(block_num, &validator_set, keys); - let versioned_proof: BeefyVersionedFinalityProof = proof.into(); + let proof = new_signed_commitment(block_num, &validator_set, keys); + let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); // should successfully decode and verify let verified = - decode_and_verify_finality_proof::(&encoded, block_num, &validator_set).unwrap(); + decode_and_verify_commitment::(&encoded, block_num, &validator_set).unwrap(); assert_eq!(verified, versioned_proof); } } diff --git a/client/beefy/src/keystore.rs b/client/beefy/src/keystore.rs index 886c00fc5d817..b0259a42075ea 100644 --- a/client/beefy/src/keystore.rs +++ b/client/beefy/src/keystore.rs @@ -19,13 +19,12 @@ use sp_application_crypto::RuntimeAppPublic; use sp_core::keccak_256; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; -use sp_runtime::traits::Keccak256; use log::warn; use beefy_primitives::{ crypto::{Public, Signature}, - BeefyVerify, KEY_TYPE, + KEY_TYPE, }; use crate::error; @@ -99,7 +98,11 @@ impl BeefyKeystore { /// /// Return `true` if the signature is authentic, `false` otherwise. pub fn verify(public: &Public, sig: &Signature, message: &[u8]) -> bool { - BeefyVerify::::verify(sig, message, public) + let msg = keccak_256(message); + let sig = sig.as_ref(); + let public = public.as_ref(); + + sp_core::ecdsa::Pair::verify_prehashed(sig, &msg, public) } } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 441f6e4248117..81c72dec8cd08 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -16,52 +16,77 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use beefy_primitives::{BeefyApi, MmrRootHash, PayloadProvider}; -use parking_lot::Mutex; +use beefy_primitives::{BeefyApi, MmrRootHash}; use prometheus::Registry; -use sc_client_api::{Backend, BlockBackend, BlockchainEvents, Finalizer}; +use sc_client_api::{Backend, BlockchainEvents, Finalizer}; use sc_consensus::BlockImport; -use sc_network::ProtocolName; -use sc_network_common::service::NetworkRequest; use sc_network_gossip::Network as GossipNetwork; -use sp_api::{NumberFor, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::Block; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; mod error; +mod gossip; mod keystore; mod metrics; mod round; mod worker; -pub mod communication; pub mod import; pub mod justification; +pub mod notification; #[cfg(test)] mod tests; use crate::{ - communication::{ - notification::{ - BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, - BeefyVersionedFinalityProofStream, - }, - peers::KnownPeers, - request_response::{ - outgoing_requests_engine::OnDemandJustificationsEngine, BeefyJustifsRequestHandler, - }, - }, import::BeefyBlockImport, + notification::{ + BeefyBestBlockSender, BeefyBestBlockStream, BeefySignedCommitmentSender, + BeefySignedCommitmentStream, + }, }; -pub use communication::beefy_protocol_name::{ - gossip_protocol_name, justifications_protocol_name as justifs_protocol_name, -}; +pub use beefy_protocol_name::standard_name as protocol_standard_name; + +pub(crate) mod beefy_protocol_name { + use sc_chain_spec::ChainSpec; + + const NAME: &str = "/beefy/1"; + /// Old names for the notifications protocol, used for backward compatibility. + pub(crate) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; + + /// Name of the notifications protocol used by BEEFY. + /// + /// Must be registered towards the networking in order for BEEFY to properly function. + pub fn standard_name>( + genesis_hash: &Hash, + chain_spec: &Box, + ) -> std::borrow::Cow<'static, str> { + let chain_prefix = match chain_spec.fork_id() { + Some(fork_id) => format!("/{}/{}", hex::encode(genesis_hash), fork_id), + None => format!("/{}", hex::encode(genesis_hash)), + }; + format!("{}{}", chain_prefix, NAME).into() + } +} + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +/// For standard protocol name see [`beefy_protocol_name::standard_name`]. +pub fn beefy_peers_set_config( + protocol_name: std::borrow::Cow<'static, str>, +) -> sc_network::config::NonDefaultSetConfig { + let mut cfg = sc_network::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); + + cfg.allow_non_reserved(25, 25); + cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); + cfg +} /// A convenience BEEFY client trait that defines all the type bounds a BEEFY client /// has to satisfy. Ideally that should actually be a trait alias. Unfortunately as @@ -96,11 +121,11 @@ where pub struct BeefyVoterLinks { // BlockImport -> Voter links /// Stream of BEEFY signed commitments from block import to voter. - pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, + pub from_block_import_justif_stream: BeefySignedCommitmentStream, // Voter -> RPC links /// Sends BEEFY signed commitments from voter to RPC. - pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, + pub to_rpc_justif_sender: BeefySignedCommitmentSender, /// Sends BEEFY best block hashes from voter to RPC. pub to_rpc_best_block_sender: BeefyBestBlockSender, } @@ -109,7 +134,7 @@ pub struct BeefyVoterLinks { #[derive(Clone)] pub struct BeefyRPCLinks { /// Stream of signed commitments coming from the voter. - pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, + pub from_voter_justif_stream: BeefySignedCommitmentStream, /// Stream of BEEFY best block hashes coming from the voter. pub from_voter_best_beefy_stream: BeefyBestBlockStream, } @@ -131,13 +156,13 @@ where { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + notification::BeefySignedCommitmentStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = - BeefyBestBlockStream::::channel(); + notification::BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + notification::BeefySignedCommitmentStream::::channel(); // BlockImport let import = @@ -152,90 +177,69 @@ where (import, voter_links, rpc_links) } -/// BEEFY gadget network parameters. -pub struct BeefyNetworkParams { - /// Network implementing gossip, requests and sync-oracle. - pub network: Arc, - /// Chain specific BEEFY gossip protocol name. See - /// [`communication::beefy_protocol_name::gossip_protocol_name`]. - pub gossip_protocol_name: ProtocolName, - /// Chain specific BEEFY on-demand justifications protocol name. See - /// [`communication::beefy_protocol_name::justifications_protocol_name`]. - pub justifications_protocol_name: ProtocolName, - - pub _phantom: PhantomData, -} - /// BEEFY gadget initialization parameters. -pub struct BeefyParams { +pub struct BeefyParams +where + B: Block, + BE: Backend, + C: Client, + R: ProvideRuntimeApi, + R::Api: BeefyApi + MmrApi, + N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, +{ /// BEEFY client pub client: Arc, /// Client Backend pub backend: Arc, - /// BEEFY Payload provider - pub payload_provider: P, /// Runtime Api Provider pub runtime: Arc, /// Local key store pub key_store: Option, - /// BEEFY voter network params - pub network_params: BeefyNetworkParams, + /// Gossip network + pub network: N, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry pub prometheus_registry: Option, + /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. + pub protocol_name: std::borrow::Cow<'static, str>, /// Links between the block importer, the background voter and the RPC layer. pub links: BeefyVoterLinks, - /// Handler for incoming BEEFY justifications requests from a remote peer. - pub on_demand_justifications_handler: BeefyJustifsRequestHandler, } /// Start the BEEFY gadget. /// /// This is a thin shim around running and awaiting a BEEFY worker. -pub async fn start_beefy_gadget(beefy_params: BeefyParams) +pub async fn start_beefy_gadget(beefy_params: BeefyParams) where B: Block, BE: Backend, - C: Client + BlockBackend, - P: PayloadProvider, + C: Client, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi>, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, + R::Api: BeefyApi + MmrApi, + N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, { let BeefyParams { client, backend, - payload_provider, runtime, key_store, - network_params, + network, min_block_delta, prometheus_registry, + protocol_name, links, - on_demand_justifications_handler, } = beefy_params; - let BeefyNetworkParams { network, gossip_protocol_name, justifications_protocol_name, .. } = - network_params; - - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let gossip_validator = - Arc::new(communication::gossip::GossipValidator::new(known_peers.clone())); + let sync_oracle = network.clone(); + let gossip_validator = Arc::new(gossip::GossipValidator::new()); let gossip_engine = sc_network_gossip::GossipEngine::new( - network.clone(), - gossip_protocol_name, + network, + protocol_name, gossip_validator.clone(), None, ); - let on_demand_justifications = OnDemandJustificationsEngine::new( - network.clone(), - runtime.clone(), - justifications_protocol_name, - known_peers.clone(), - ); - let metrics = prometheus_registry.as_ref().map(metrics::Metrics::register).and_then( |result| match result { @@ -253,20 +257,17 @@ where let worker_params = worker::WorkerParams { client, backend, - payload_provider, runtime, - network, + sync_oracle, key_store: key_store.into(), - known_peers, gossip_engine, gossip_validator, - on_demand_justifications, links, metrics, min_block_delta, }; - let worker = worker::BeefyWorker::<_, _, _, _, _, _>::new(worker_params); + let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); - futures::future::join(worker.run(), on_demand_justifications_handler.run()).await; + worker.run().await } diff --git a/client/beefy/src/communication/notification.rs b/client/beefy/src/notification.rs similarity index 71% rename from client/beefy/src/communication/notification.rs rename to client/beefy/src/notification.rs index c673115e487f3..9479891714234 100644 --- a/client/beefy/src/communication/notification.rs +++ b/client/beefy/src/notification.rs @@ -19,7 +19,7 @@ use sc_utils::notification::{NotificationSender, NotificationStream, TracingKeyStr}; use sp_runtime::traits::Block as BlockT; -use crate::justification::BeefyVersionedFinalityProof; +use crate::justification::BeefySignedCommitment; /// The sending half of the notifications channel(s) used to send /// notifications about best BEEFY block from the gadget side. @@ -31,14 +31,13 @@ pub type BeefyBestBlockStream = NotificationStream<::Hash, BeefyBestBlockTracingKey>; /// The sending half of the notifications channel(s) used to send notifications -/// about versioned finality proof generated at the end of a BEEFY round. -pub type BeefyVersionedFinalityProofSender = - NotificationSender>; +/// about signed commitments generated at the end of a BEEFY round. +pub type BeefySignedCommitmentSender = NotificationSender>; /// The receiving half of a notifications channel used to receive notifications -/// about versioned finality proof generated at the end of a BEEFY round. -pub type BeefyVersionedFinalityProofStream = - NotificationStream, BeefyVersionedFinalityProofTracingKey>; +/// about signed commitments generated at the end of a BEEFY round. +pub type BeefySignedCommitmentStream = + NotificationStream, BeefySignedCommitmentTracingKey>; /// Provides tracing key for BEEFY best block stream. #[derive(Clone)] @@ -47,9 +46,9 @@ impl TracingKeyStr for BeefyBestBlockTracingKey { const TRACING_KEY: &'static str = "mpsc_beefy_best_block_notification_stream"; } -/// Provides tracing key for BEEFY versioned finality proof stream. +/// Provides tracing key for BEEFY signed commitments stream. #[derive(Clone)] -pub struct BeefyVersionedFinalityProofTracingKey; -impl TracingKeyStr for BeefyVersionedFinalityProofTracingKey { - const TRACING_KEY: &'static str = "mpsc_beefy_versioned_finality_proof_notification_stream"; +pub struct BeefySignedCommitmentTracingKey; +impl TracingKeyStr for BeefySignedCommitmentTracingKey { + const TRACING_KEY: &'static str = "mpsc_beefy_signed_commitments_notification_stream"; } diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index 45d346ccd85eb..ebd85c8dea05d 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -33,7 +33,7 @@ use sp_runtime::traits::{Block, NumberFor}; /// whether the local `self` validator has voted/signed. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Debug, Default)] +#[derive(Default)] struct RoundTracker { self_vote: bool, votes: HashMap, @@ -69,7 +69,6 @@ pub fn threshold(authorities: usize) -> usize { /// Only round numbers > `best_done` are of interest, all others are considered stale. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Debug)] pub(crate) struct Rounds { rounds: BTreeMap<(Payload, NumberFor), RoundTracker>, session_start: NumberFor, @@ -110,7 +109,7 @@ where } pub(crate) fn should_self_vote(&self, round: &(P, NumberFor)) -> bool { - Some(round.1) > self.best_done && + Some(round.1.clone()) > self.best_done && self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) } @@ -136,7 +135,7 @@ where } } - pub(crate) fn should_conclude( + pub(crate) fn try_conclude( &mut self, round: &(P, NumberFor), ) -> Option>> { @@ -148,7 +147,13 @@ where trace!(target: "beefy", "🥩 Round #{} done: {}", round.1, done); if done { + // remove this and older (now stale) rounds let signatures = self.rounds.remove(round)?.votes; + self.rounds.retain(|&(_, number), _| number > round.1); + self.mandatory_done = self.mandatory_done || round.1 == self.session_start; + self.best_done = self.best_done.max(Some(round.1)); + debug!(target: "beefy", "🥩 Concluded round #{}", round.1); + Some( self.validators() .iter() @@ -160,12 +165,9 @@ where } } - pub(crate) fn conclude(&mut self, round_num: NumberFor) { - // Remove this and older (now stale) rounds. - self.rounds.retain(|&(_, number), _| number > round_num); - self.mandatory_done = self.mandatory_done || round_num == self.session_start; - self.best_done = self.best_done.max(Some(round_num)); - debug!(target: "beefy", "🥩 Concluded round #{}", round_num); + #[cfg(test)] + pub(crate) fn test_set_mandatory_done(&mut self, done: bool) { + self.mandatory_done = done; } } @@ -176,19 +178,9 @@ mod tests { use beefy_primitives::{crypto::Public, ValidatorSet}; - use super::{threshold, Block as BlockT, Hash, RoundTracker, Rounds}; + use super::{threshold, RoundTracker, Rounds}; use crate::keystore::tests::Keyring; - impl Rounds - where - P: Ord + Hash + Clone, - B: BlockT, - { - pub(crate) fn test_set_mandatory_done(&mut self, done: bool) { - self.mandatory_done = done; - } - } - #[test] fn round_tracker() { let mut rt = RoundTracker::default(); @@ -279,7 +271,7 @@ mod tests { true )); // round not concluded - assert!(rounds.should_conclude(&round).is_none()); + assert!(rounds.try_conclude(&round).is_none()); // self vote already present, should not self vote assert!(!rounds.should_self_vote(&round)); @@ -296,7 +288,7 @@ mod tests { (Keyring::Dave.public(), Keyring::Dave.sign(b"I am committed")), false )); - assert!(rounds.should_conclude(&round).is_none()); + assert!(rounds.try_conclude(&round).is_none()); // add 2nd good vote assert!(rounds.add_vote( @@ -305,7 +297,7 @@ mod tests { false )); // round not concluded - assert!(rounds.should_conclude(&round).is_none()); + assert!(rounds.try_conclude(&round).is_none()); // add 3rd good vote assert!(rounds.add_vote( @@ -314,8 +306,7 @@ mod tests { false )); // round concluded - assert!(rounds.should_conclude(&round).is_some()); - rounds.conclude(round.1); + assert!(rounds.try_conclude(&round).is_some()); // Eve is a validator, but round was concluded, adding vote disallowed assert!(!rounds.add_vote( @@ -433,12 +424,11 @@ mod tests { assert_eq!(3, rounds.rounds.len()); // conclude unknown round - assert!(rounds.should_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); + assert!(rounds.try_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); assert_eq!(3, rounds.rounds.len()); // conclude round 2 - let signatures = rounds.should_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); - rounds.conclude(2); + let signatures = rounds.try_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); assert_eq!(1, rounds.rounds.len()); assert_eq!( diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 1d5da4aaefba3..9c8f443dd1f7e 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -21,29 +21,30 @@ use futures::{future, stream::FuturesUnordered, Future, StreamExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, marker::PhantomData, sync::Arc, task::Poll}; +use std::{collections::HashMap, sync::Arc, task::Poll}; use tokio::{runtime::Runtime, time::Duration}; +use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, ImportedAux, }; +use sc_keystore::LocalKeystore; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, - PeersFullClient, TestNetFactory, + TestNetFactory, }; use sc_utils::notification::NotificationReceiver; -use sp_keystore::testing::KeyStore as TestKeystore; use beefy_primitives::{ crypto::{AuthorityId, Signature}, - mmr::MmrRootProvider, BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; -use sc_network::{config::RequestResponseConfig, ProtocolName}; -use sp_mmr_primitives::{EncodableOpaqueLeaf, Error as MmrError, MmrApi, Proof}; +use sp_mmr_primitives::{ + BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, +}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; @@ -59,21 +60,11 @@ use sp_runtime::{ use substrate_test_runtime_client::{runtime::Header, ClientExt}; use crate::{ - beefy_block_import_and_links, - communication::request_response::{ - on_demand_justifications_protocol_config, BeefyJustifsRequestHandler, - }, - gossip_protocol_name, - justification::*, - keystore::tests::Keyring as BeefyKeyring, - BeefyRPCLinks, BeefyVoterLinks, + beefy_block_import_and_links, beefy_protocol_name, justification::*, + keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, }; -const GENESIS_HASH: H256 = H256::zero(); -fn beefy_gossip_proto_name() -> ProtocolName { - gossip_protocol_name(GENESIS_HASH, None) -} - +pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); @@ -98,12 +89,35 @@ impl BuildStorage for Genesis { } } +#[test] +fn beefy_protocol_name() { + let chain_spec = GenericChainSpec::::from_json_bytes( + &include_bytes!("../../chain-spec/res/chain_spec.json")[..], + ) + .unwrap() + .cloned_box(); + + // Create protocol name using random genesis hash. + let genesis_hash = H256::random(); + let expected = format!("/{}/beefy/1", hex::encode(genesis_hash)); + let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); + assert_eq!(proto_name.to_string(), expected); + + // Create protocol name using hardcoded genesis hash. Verify exact representation. + let genesis_hash = [ + 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, 94, + 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, + ]; + let expected = + "/32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93/beefy/1".to_string(); + let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); + assert_eq!(proto_name.to_string(), expected); +} + #[derive(Default)] pub(crate) struct PeerData { pub(crate) beefy_rpc_links: Mutex>>, pub(crate) beefy_voter_links: Mutex>>, - pub(crate) beefy_justif_req_handler: - Mutex>>, } #[derive(Default)] @@ -112,37 +126,26 @@ pub(crate) struct BeefyTestNet { } impl BeefyTestNet { - pub(crate) fn new(n_authority: usize) -> Self { - let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority) }; - - for i in 0..n_authority { - let (rx, cfg) = on_demand_justifications_protocol_config(GENESIS_HASH, None); - let justif_protocol_name = cfg.name.clone(); - - net.add_authority_peer(vec![cfg]); - - let client = net.peers[i].client().as_client(); - let justif_handler = BeefyJustifsRequestHandler { - request_receiver: rx, - justif_protocol_name, - client, - _block: PhantomData, - }; - *net.peers[i].data.beefy_justif_req_handler.lock() = Some(justif_handler); + pub(crate) fn new(n_authority: usize, n_full: usize) -> Self { + let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority + n_full) }; + for _ in 0..n_authority { + net.add_authority_peer(); + } + for _ in 0..n_full { + net.add_full_peer(); } net } - pub(crate) fn add_authority_peer(&mut self, req_resp_cfgs: Vec) { + pub(crate) fn add_authority_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![beefy_gossip_proto_name()], - request_response_protocols: req_resp_cfgs, + notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], is_authority: true, ..Default::default() - }); + }) } - pub(crate) fn generate_blocks_and_sync( + pub(crate) fn generate_blocks( &mut self, count: usize, session_length: u64, @@ -165,7 +168,6 @@ impl BeefyTestNet { block }); - self.block_until_sync(); } } @@ -195,7 +197,6 @@ impl TestNetFactory for BeefyTestNet { let peer_data = PeerData { beefy_rpc_links: Mutex::new(Some(rpc_links)), beefy_voter_links: Mutex::new(Some(voter_links)), - ..Default::default() }; (BlockImportAdapter::new(block_import), None, peer_data) } @@ -213,8 +214,11 @@ impl TestNetFactory for BeefyTestNet { } fn add_full_peer(&mut self) { - // `add_authority_peer()` used instead. - unimplemented!() + self.add_full_peer_with_config(FullPeerConfig { + notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], + is_authority: false, + ..Default::default() + }) } } @@ -245,26 +249,41 @@ macro_rules! create_test_api { } } - impl MmrApi> for RuntimeApi { + impl MmrApi for RuntimeApi { + fn generate_proof(_leaf_index: LeafIndex) + -> Result<(EncodableOpaqueLeaf, Proof), MmrError> { + unimplemented!() + } + + fn verify_proof(_leaf: EncodableOpaqueLeaf, _proof: Proof) + -> Result<(), MmrError> { + unimplemented!() + } + + fn verify_proof_stateless( + _root: MmrRootHash, + _leaf: EncodableOpaqueLeaf, + _proof: Proof + ) -> Result<(), MmrError> { + unimplemented!() + } + fn mmr_root() -> Result { Ok($mmr_root) } - fn generate_proof( - _block_numbers: Vec, - _best_known_block_number: Option - ) -> Result<(Vec, Proof), MmrError> { + fn generate_batch_proof(_leaf_indices: Vec) -> Result<(Vec, BatchProof), MmrError> { unimplemented!() } - fn verify_proof(_leaves: Vec, _proof: Proof) -> Result<(), MmrError> { + fn verify_batch_proof(_leaves: Vec, _proof: BatchProof) -> Result<(), MmrError> { unimplemented!() } - fn verify_proof_stateless( + fn verify_batch_proof_stateless( _root: MmrRootHash, _leaves: Vec, - _proof: Proof + _proof: BatchProof ) -> Result<(), MmrError> { unimplemented!() } @@ -307,11 +326,11 @@ fn add_auth_change_digest(header: &mut Header, new_auth_set: BeefyValidatorSet) } pub(crate) fn make_beefy_ids(keys: &[BeefyKeyring]) -> Vec { - keys.iter().map(|&key| key.public().into()).collect() + keys.iter().map(|key| key.clone().public().into()).collect() } pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeystore::new()); + let keystore = Arc::new(LocalKeystore::in_memory()); SyncCryptoStore::ecdsa_generate_new(&*keystore, BeefyKeyType, Some(&authority.to_seed())) .expect("Creates authority key"); keystore @@ -325,9 +344,9 @@ fn initialize_beefy( ) -> impl Future where API: ProvideRuntimeApi + Default + Sync + Send, - API::Api: BeefyApi + MmrApi>, + API::Api: BeefyApi + MmrApi, { - let tasks = FuturesUnordered::new(); + let voters = FuturesUnordered::new(); for (peer_id, key, api) in peers.into_iter() { let peer = &net.peers[peer_id]; @@ -335,42 +354,31 @@ where let keystore = create_beefy_keystore(*key); let (_, _, peer_data) = net.make_block_import(peer.client().clone()); - let PeerData { beefy_rpc_links, beefy_voter_links, .. } = peer_data; + let PeerData { beefy_rpc_links, beefy_voter_links } = peer_data; let beefy_voter_links = beefy_voter_links.lock().take(); *peer.data.beefy_rpc_links.lock() = beefy_rpc_links.lock().take(); *peer.data.beefy_voter_links.lock() = beefy_voter_links.clone(); - let on_demand_justif_handler = peer.data.beefy_justif_req_handler.lock().take().unwrap(); - - let network_params = crate::BeefyNetworkParams { - network: peer.network_service().clone(), - gossip_protocol_name: beefy_gossip_proto_name(), - justifications_protocol_name: on_demand_justif_handler.protocol_name(), - _phantom: PhantomData, - }; - let payload_provider = MmrRootProvider::new(api.clone()); - let beefy_params = crate::BeefyParams { client: peer.client().as_client(), backend: peer.client().as_backend(), - payload_provider, runtime: api.clone(), key_store: Some(keystore), - network_params, + network: peer.network_service().clone(), links: beefy_voter_links.unwrap(), min_block_delta, prometheus_registry: None, - on_demand_justifications_handler: on_demand_justif_handler, + protocol_name: BEEFY_PROTOCOL_NAME.into(), }; - let task = crate::start_beefy_gadget::<_, _, _, _, _, _>(beefy_params); + let gadget = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} - assert_send(&task); - tasks.push(task); + assert_send(&gadget); + voters.push(gadget); } - tasks.for_each(|_| async move {}) + voters.for_each(|_| async move {}) } fn block_until(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { @@ -388,20 +396,18 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, - // peer index and key - peers: impl Iterator, -) -> (Vec>, Vec>>) -{ + peers: &[BeefyKeyring], +) -> (Vec>, Vec>>) { let mut best_block_streams = Vec::new(); - let mut versioned_finality_proof_streams = Vec::new(); - peers.for_each(|(index, _)| { - let beefy_rpc_links = net.peer(index).data.beefy_rpc_links.lock().clone().unwrap(); + let mut signed_commitment_streams = Vec::new(); + for peer_id in 0..peers.len() { + let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; best_block_streams.push(from_voter_best_beefy_stream.subscribe()); - versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); - }); - (best_block_streams, versioned_finality_proof_streams) + signed_commitment_streams.push(from_voter_justif_stream.subscribe()); + } + (best_block_streams, signed_commitment_streams) } fn wait_for_best_beefy_blocks( @@ -431,7 +437,7 @@ fn wait_for_best_beefy_blocks( } fn wait_for_beefy_signed_commitments( - streams: Vec>>, + streams: Vec>>, net: &Arc>, runtime: &mut Runtime, expected_commitment_block_nums: &[u64], @@ -440,12 +446,9 @@ fn wait_for_beefy_signed_commitments( let len = expected_commitment_block_nums.len(); streams.into_iter().for_each(|stream| { let mut expected = expected_commitment_block_nums.iter(); - wait_for.push(Box::pin(stream.take(len).for_each(move |versioned_finality_proof| { + wait_for.push(Box::pin(stream.take(len).for_each(move |signed_commitment| { let expected = expected.next(); async move { - let signed_commitment = match versioned_finality_proof { - beefy_primitives::VersionedFinalityProof::V1(sc) => sc, - }; let commitment_block_num = signed_commitment.commitment.block_number; assert_eq!(expected, Some(commitment_block_num).as_ref()); // TODO: also verify commitment payload, validator set id, and signatures. @@ -478,31 +481,29 @@ fn streams_empty_after_timeout( fn finalize_block_and_wait_for_beefy( net: &Arc>, - // peer index and key - peers: impl Iterator + Clone, + peers: &[BeefyKeyring], runtime: &mut Runtime, finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); for block in finalize_targets { - peers.clone().for_each(|(index, _)| { - let client = net.lock().peer(index).client().as_client(); - let finalize = client.expect_block_hash_from_id(&BlockId::number(*block)).unwrap(); - client.finalize_block(finalize, None).unwrap(); - }) + let finalize = BlockId::number(*block); + for i in 0..peers.len() { + net.lock().peer(i).client().as_client().finalize_block(finalize, None).unwrap(); + } } if expected_beefy.is_empty() { // run for quarter second then verify no new best beefy block available let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, runtime, timeout); - streams_empty_after_timeout(versioned_finality_proof, &net, runtime, None); + streams_empty_after_timeout(signed_commitments, &net, runtime, None); } else { // run until expected beefy blocks are received wait_for_best_beefy_blocks(best_blocks, &net, runtime, expected_beefy); - wait_for_beefy_signed_commitments(versioned_finality_proof, &net, runtime, expected_beefy); + wait_for_beefy_signed_commitments(signed_commitments, &net, runtime, expected_beefy); } } @@ -511,36 +512,36 @@ fn beefy_finalizing_blocks() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); + let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); let session_len = 10; let min_block_delta = 4; - let mut net = BeefyTestNet::new(2); + let mut net = BeefyTestNet::new(2, 0); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 42 blocks including `AuthorityChange` digests every 10 blocks. - net.generate_blocks_and_sync(42, session_len, &validator_set, true); + net.generate_blocks(42, session_len, &validator_set, true); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); // Minimum BEEFY block delta is 4. - let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[5], &[1, 5]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[5], &[1, 5]); // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[10], &[10]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[10]); // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[18], &[14, 18]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[18], &[14, 18]); // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[20], &[20]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[20], &[20]); // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[21], &[]); @@ -551,82 +552,64 @@ fn lagging_validators() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); + let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); let session_len = 30; let min_block_delta = 1; - let mut net = BeefyTestNet::new(2); + let mut net = BeefyTestNet::new(2, 0); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 62 blocks including `AuthorityChange` digests every 30 blocks. - net.generate_blocks_and_sync(62, session_len, &validator_set, true); + net.generate_blocks(62, session_len, &validator_set, true); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); - let peers = peers.into_iter().enumerate(); // finalize block #15 -> BEEFY should finalize #1 (mandatory) and #9, #13, #14, #15 from // diff-power-of-two rule. - finalize_block_and_wait_for_beefy( - &net, - peers.clone(), - &mut runtime, - &[15], - &[1, 9, 13, 14, 15], - ); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[15], &[1, 9, 13, 14, 15]); // Alice finalizes #25, Bob lags behind - let finalize = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(25)) - .unwrap(); - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + let finalize = BlockId::number(25); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); + streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); - wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); + wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[30, 32], &[30, 31, 32]); + finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - let finalize = net - .lock() - .peer(0) - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::number(60)) - .unwrap(); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); + streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); - // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 + // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); - wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); + wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[60]); } #[test] @@ -634,12 +617,13 @@ fn correct_beefy_payload() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); + let peers = + &[BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); let session_len = 20; let min_block_delta = 2; - let mut net = BeefyTestNet::new(4); + let mut net = BeefyTestNet::new(4, 0); // Alice, Bob, Charlie will vote on good payloads let good_api = Arc::new(four_validators::TestApi {}); @@ -655,42 +639,55 @@ fn correct_beefy_payload() { let bad_peers = vec![(3, &BeefyKeyring::Dave, bad_api)]; runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); - // push 12 blocks - net.generate_blocks_and_sync(12, session_len, &validator_set, false); + // push 10 blocks + net.generate_blocks(12, session_len, &validator_set, false); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); - let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); - let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); + let (best_blocks, signed_commitments) = + get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); // now 2 good validators and 1 bad one are voting - let hashof11 = net - .lock() + net.lock() .peer(0) .client() .as_client() - .expect_block_hash_from_id(&BlockId::number(11)) + .finalize_block(BlockId::number(11), None) + .unwrap(); + net.lock() + .peer(1) + .client() + .as_client() + .finalize_block(BlockId::number(11), None) + .unwrap(); + net.lock() + .peer(3) + .client() + .as_client() + .finalize_block(BlockId::number(11), None) .unwrap(); - net.lock().peer(0).client().as_client().finalize_block(hashof11, None).unwrap(); - net.lock().peer(1).client().as_client().finalize_block(hashof11, None).unwrap(); - net.lock().peer(3).client().as_client().finalize_block(hashof11, None).unwrap(); // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); + streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); // 3rd good validator catches up and votes as well - let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); - net.lock().peer(2).client().as_client().finalize_block(hashof11, None).unwrap(); + let (best_blocks, signed_commitments) = + get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + net.lock() + .peer(2) + .client() + .as_client() + .finalize_block(BlockId::number(11), None) + .unwrap(); // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); - wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[11]); + wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[11]); } #[test] @@ -701,11 +698,11 @@ fn beefy_importing_blocks() { sp_tracing::try_init_simple(); - let mut net = BeefyTestNet::new(2); + let mut net = BeefyTestNet::new(2, 0); let client = net.peer(0).client().clone(); let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); - let PeerData { beefy_voter_links, .. } = peer_data; + let PeerData { beefy_rpc_links: _, beefy_voter_links } = peer_data; let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; let params = |block: Block, justifications: Option| { @@ -719,9 +716,9 @@ fn beefy_importing_blocks() { let full_client = client.as_client(); let parent_id = BlockId::Number(0); + let block_id = BlockId::Number(1); let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; - let hashof1 = block.header.hash(); // Import without justifications. let mut justif_recv = justif_stream.subscribe(); @@ -733,16 +730,10 @@ fn beefy_importing_blocks() { block_on(block_import.import_block(params(block, None), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); - // Verify no BEEFY justifications present: + // Verify no justifications present: { // none in backend, - assert_eq!( - full_client - .justifications(hashof1) - .unwrap() - .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), - None - ); + assert!(full_client.justifications(&block_id).unwrap().is_none()); // and none sent to BEEFY worker. block_on(poll_fn(move |cx| { assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); @@ -755,14 +746,13 @@ fn beefy_importing_blocks() { let block_num = 2; let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); + let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; - let hashof2 = block.header.hash(); let mut justif_recv = justif_stream.subscribe(); assert_eq!( block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), @@ -772,18 +762,11 @@ fn beefy_importing_blocks() { ..Default::default() }), ); - // Verify BEEFY justification successfully imported: + // Verify justification successfully imported: { - // still not in backend (worker is responsible for appending to backend), - assert_eq!( - full_client - .justifications(hashof2) - .unwrap() - .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), - None - ); - // but sent to BEEFY worker - // (worker will append it to backend when all previous mandatory justifs are there as well). + // available in backend, + assert!(full_client.justifications(&BlockId::Number(block_num)).unwrap().is_some()); + // and also sent to BEEFY worker. block_on(poll_fn(move |cx| { match justif_recv.poll_next_unpin(cx) { Poll::Ready(Some(_justification)) => (), @@ -798,14 +781,13 @@ fn beefy_importing_blocks() { let block_num = 3; let keys = &[BeefyKeyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); + let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; - let hashof3 = block.header.hash(); let mut justif_recv = justif_stream.subscribe(); assert_eq!( block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), @@ -816,16 +798,10 @@ fn beefy_importing_blocks() { ..Default::default() }), ); - // Verify bad BEEFY justifications was not imported: + // Verify bad justifications was not imported: { // none in backend, - assert_eq!( - full_client - .justifications(hashof3) - .unwrap() - .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), - None - ); + assert!(full_client.justifications(&block_id).unwrap().is_none()); // and none sent to BEEFY worker. block_on(poll_fn(move |cx| { assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); @@ -833,115 +809,3 @@ fn beefy_importing_blocks() { })); } } - -#[test] -fn voter_initialization() { - sp_tracing::try_init_simple(); - // Regression test for voter initialization where finality notifications were dropped - // after waiting for BEEFY pallet availability. - - let mut runtime = Runtime::new().unwrap(); - let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); - let session_len = 5; - // Should vote on all mandatory blocks no matter the `min_block_delta`. - let min_block_delta = 10; - - let mut net = BeefyTestNet::new(2); - let api = Arc::new(two_validators::TestApi {}); - let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); - runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); - - // push 26 blocks - net.generate_blocks_and_sync(26, session_len, &validator_set, false); - let net = Arc::new(Mutex::new(net)); - - // Finalize multiple blocks at once to get a burst of finality notifications right from start. - // Need to finalize at least one block in each session, choose randomly. - // Expect voters to pick up all of them and BEEFY-finalize the mandatory blocks of each session. - finalize_block_and_wait_for_beefy( - &net, - peers.into_iter().enumerate(), - &mut runtime, - &[1, 6, 10, 17, 24, 26], - &[1, 5, 10, 15, 20, 25], - ); -} - -#[test] -fn on_demand_beefy_justification_sync() { - sp_tracing::try_init_simple(); - - let mut runtime = Runtime::new().unwrap(); - let all_peers = - [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(&all_peers), 0).unwrap(); - let session_len = 5; - let min_block_delta = 5; - - let mut net = BeefyTestNet::new(4); - - // Alice, Bob, Charlie start first and make progress through voting. - let api = Arc::new(four_validators::TestApi {}); - let fast_peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; - let voting_peers = - fast_peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); - runtime.spawn(initialize_beefy(&mut net, voting_peers, min_block_delta)); - - // Dave will start late and have to catch up using on-demand justification requests (since - // in this test there is no block import queue to automatically import justifications). - let dave = vec![(3, &BeefyKeyring::Dave, api)]; - // Instantiate but don't run Dave, yet. - let dave_task = initialize_beefy(&mut net, dave, min_block_delta); - let dave_index = 3; - - // push 30 blocks - net.generate_blocks_and_sync(30, session_len, &validator_set, false); - - let fast_peers = fast_peers.into_iter().enumerate(); - let net = Arc::new(Mutex::new(net)); - // With 3 active voters and one inactive, consensus should happen and blocks BEEFY-finalized. - // Need to finalize at least one block in each session, choose randomly. - finalize_block_and_wait_for_beefy( - &net, - fast_peers.clone(), - &mut runtime, - &[1, 6, 10, 17, 24], - &[1, 5, 10, 15, 20], - ); - - // Spawn Dave, he's now way behind voting and can only catch up through on-demand justif sync. - runtime.spawn(dave_task); - // give Dave a chance to spawn and init. - run_for(Duration::from_millis(400), &net, &mut runtime); - - let (dave_best_blocks, _) = - get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); - let client = net.lock().peer(dave_index).client().as_client(); - let hashof1 = client.expect_block_hash_from_id(&BlockId::number(1)).unwrap(); - client.finalize_block(hashof1, None).unwrap(); - // Give Dave task some cpu cycles to process the finality notification, - run_for(Duration::from_millis(100), &net, &mut runtime); - // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. - - // Have the other peers do some gossip so Dave finds out about their progress. - finalize_block_and_wait_for_beefy(&net, fast_peers, &mut runtime, &[25], &[25]); - - // Now verify Dave successfully finalized #1 (through on-demand justification request). - wait_for_best_beefy_blocks(dave_best_blocks, &net, &mut runtime, &[1]); - - // Give Dave all tasks some cpu cycles to burn through their events queues, - run_for(Duration::from_millis(100), &net, &mut runtime); - // then verify Dave catches up through on-demand justification requests. - finalize_block_and_wait_for_beefy( - &net, - [(dave_index, BeefyKeyring::Dave)].into_iter(), - &mut runtime, - &[6, 10, 17, 24, 26], - &[5, 10, 15, 20, 25], - ); - - let all_peers = all_peers.into_iter().enumerate(); - // Now that Dave has caught up, sanity check voting works for all of them. - finalize_block_and_wait_for_beefy(&net, all_peers, &mut runtime, &[30], &[30]); -} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 9c14128624518..3bff0822ebdb4 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -24,20 +24,14 @@ use std::{ }; use codec::{Codec, Decode, Encode}; -use futures::{stream::Fuse, FutureExt, StreamExt}; +use futures::StreamExt; use log::{debug, error, info, log_enabled, trace, warn}; -use parking_lot::Mutex; -use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; -use sc_network_common::{ - protocol::event::Event as NetEvent, - service::{NetworkEventStream, NetworkRequest}, -}; +use sc_client_api::{Backend, FinalityNotification}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; -use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::SyncOracle; use sp_mmr_primitives::MmrApi; use sp_runtime::{ @@ -48,22 +42,19 @@ use sp_runtime::{ use beefy_primitives::{ crypto::{AuthorityId, Signature}, - BeefyApi, Commitment, ConsensusLog, MmrRootHash, Payload, PayloadProvider, SignedCommitment, + known_payload_ids, BeefyApi, Commitment, ConsensusLog, MmrRootHash, Payload, SignedCommitment, ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, }; use crate::{ - communication::{ - gossip::{topic, GossipValidator}, - request_response::outgoing_requests_engine::OnDemandJustificationsEngine, - }, error::Error, - justification::BeefyVersionedFinalityProof, + gossip::{topic, GossipValidator}, + justification::BeefySignedCommitment, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, round::Rounds, - BeefyVoterLinks, Client, KnownPeers, + BeefyVoterLinks, Client, }; enum RoundAction { @@ -121,17 +112,6 @@ impl VoterOracle { } } - /// Return current pending mandatory block, if any. - pub fn mandatory_pending(&self) -> Option> { - self.sessions.front().and_then(|round| { - if round.mandatory_done() { - None - } else { - Some(round.session_start()) - } - }) - } - /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. pub fn accepted_interval( &self, @@ -194,37 +174,29 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, - pub payload_provider: P, pub runtime: Arc, - pub network: N, + pub sync_oracle: SO, pub key_store: BeefyKeystore, - pub known_peers: Arc>>, pub gossip_engine: GossipEngine, pub gossip_validator: Arc>, - pub on_demand_justifications: OnDemandJustificationsEngine, pub links: BeefyVoterLinks, pub metrics: Option, pub min_block_delta: u32, } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities client: Arc, backend: Arc, - payload_provider: P, runtime: Arc, - network: N, + sync_oracle: SO, key_store: BeefyKeystore, - - // communication - known_peers: Arc>>, gossip_engine: GossipEngine, gossip_validator: Arc>, - on_demand_justifications: OnDemandJustificationsEngine, // channels /// Links between the block importer, the background voter and the RPC layer. @@ -240,20 +212,19 @@ pub(crate) struct BeefyWorker { /// Buffer holding votes for future processing. pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, /// Buffer holding justifications for future processing. - pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + pending_justifications: BTreeMap, Vec>>, /// Chooses which incoming votes to accept and which votes to generate. voting_oracle: VoterOracle, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, C: Client, - P: PayloadProvider, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi>, - N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, + R::Api: BeefyApi + MmrApi, + SO: SyncOracle + Send + Sync + Clone + 'static, { /// Return a new BEEFY worker instance. /// @@ -261,39 +232,32 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { client, backend, - payload_provider, runtime, key_store, - network, + sync_oracle, gossip_engine, gossip_validator, - on_demand_justifications, - known_peers, links, metrics, min_block_delta, } = worker_params; - let last_finalized_header = backend - .blockchain() - .expect_header(BlockId::number(backend.blockchain().info().finalized_number)) + let last_finalized_header = client + .expect_header(BlockId::number(client.info().finalized_number)) .expect("latest block always has header available; qed."); BeefyWorker { client: client.clone(), backend, - payload_provider, runtime, - network, - known_peers, + sync_oracle, key_store, gossip_engine, gossip_validator, - on_demand_justifications, links, metrics, best_grandpa_block_header: last_finalized_header, @@ -304,6 +268,17 @@ where } } + /// Simple wrapper that gets MMR root from header digests or from client state. + fn get_mmr_root_digest(&self, header: &B::Header) -> Option { + find_mmr_root_digest::(header).or_else(|| { + self.runtime + .runtime_api() + .mmr_root(&BlockId::hash(header.hash())) + .ok() + .and_then(|r| r.ok()) + }) + } + /// Verify `active` validator set for `block` against the key store /// /// We want to make sure that we have _at least one_ key in our keystore that @@ -338,8 +313,9 @@ where new_session_start: NumberFor, ) { debug!(target: "beefy", "🥩 New active validator set: {:?}", validator_set); + metric_set!(self, beefy_validator_set_id, validator_set.id()); - // BEEFY should finalize a mandatory block during each session. + // BEEFY should produce the mandatory block of each session. if let Some(active_session) = self.voting_oracle.rounds_mut() { if !active_session.mandatory_done() { debug!( @@ -358,12 +334,7 @@ where let id = validator_set.id(); self.voting_oracle.add_session(Rounds::new(new_session_start, validator_set)); - metric_set!(self, beefy_validator_set_id, id); - info!( - target: "beefy", - "🥩 New Rounds for validator set id: {:?} with session_start {:?}", - id, new_session_start - ); + info!(target: "beefy", "🥩 New Rounds for validator set id: {:?} with session_start {:?}", id, new_session_start); } fn handle_finality_notification(&mut self, notification: &FinalityNotification) { @@ -374,22 +345,11 @@ where // update best GRANDPA finalized block we have seen self.best_grandpa_block_header = header.clone(); - // Check all (newly) finalized blocks for new session(s). - let backend = self.backend.clone(); - for header in notification - .tree_route - .iter() - .map(|hash| { - backend - .blockchain() - .expect_header(BlockId::hash(*hash)) - .expect("just finalized block should be available; qed.") - }) - .chain(std::iter::once(header.clone())) - { - if let Some(new_validator_set) = find_authorities_change::(&header) { - self.init_session_at(new_validator_set, *header.number()); - } + // Check for and enqueue potential new session. + if let Some(new_validator_set) = find_authorities_change::(header) { + self.init_session_at(new_validator_set, *header.number()); + // TODO: when adding SYNC protocol, fire up a request for justification for this + // mandatory block here. } } } @@ -421,21 +381,15 @@ where /// Expects `justification` to be valid. fn triage_incoming_justif( &mut self, - justification: BeefyVersionedFinalityProof, + justification: BeefySignedCommitment, ) -> Result<(), Error> { - let signed_commitment = match justification { - VersionedFinalityProof::V1(ref sc) => sc, - }; - let block_num = signed_commitment.commitment.block_number; + let block_num = justification.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { - RoundAction::Process => { - debug!(target: "beefy", "🥩 Process justification for round: {:?}.", block_num); - self.finalize(justification)? - }, + RoundAction::Process => self.finalize(justification), RoundAction::Enqueue => { debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); - self.pending_justifications.entry(block_num).or_insert(justification); + self.pending_justifications.entry(block_num).or_default().push(justification) }, RoundAction::Drop => (), }; @@ -453,7 +407,7 @@ where let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if rounds.add_vote(&round, vote, self_vote) { - if let Some(signatures) = rounds.should_conclude(&round) { + if let Some(signatures) = rounds.try_conclude(&round) { self.gossip_validator.conclude_round(round.1); let block_num = round.1; @@ -463,67 +417,59 @@ where validator_set_id: rounds.validator_set_id(), }; - let finality_proof = - VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }); + let signed_commitment = SignedCommitment { commitment, signatures }; metric_set!(self, beefy_round_concluded, block_num); - info!(target: "beefy", "🥩 Round #{} concluded, finality_proof: {:?}.", round.1, finality_proof); + info!(target: "beefy", "🥩 Round #{} concluded, committed: {:?}.", round.1, signed_commitment); + + if let Err(e) = self.backend.append_justification( + BlockId::Number(block_num), + ( + BEEFY_ENGINE_ID, + VersionedFinalityProof::V1(signed_commitment.clone()).encode(), + ), + ) { + debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, signed_commitment); + } - // We created the `finality_proof` and know to be valid. - self.finalize(finality_proof)?; + // We created the `signed_commitment` and know to be valid. + self.finalize(signed_commitment); } } Ok(()) } - /// Provide BEEFY finality for block based on `finality_proof`: + /// Provide BEEFY finality for block based on `signed_commitment`: /// 1. Prune irrelevant past sessions from the oracle, /// 2. Set BEEFY best block, - /// 3. Send best block hash and `finality_proof` to RPC worker. + /// 3. Send best block hash and `signed_commitment` to RPC worker. /// - /// Expects `finality proof` to be valid. - fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) -> Result<(), Error> { - let block_num = match finality_proof { - VersionedFinalityProof::V1(ref sc) => sc.commitment.block_number, - }; - - // Conclude voting round for this block. - self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?.conclude(block_num); + /// Expects `signed commitment` to be valid. + fn finalize(&mut self, signed_commitment: BeefySignedCommitment) { // Prune any now "finalized" sessions from queue. self.voting_oracle.try_prune(); + let block_num = signed_commitment.commitment.block_number; if Some(block_num) > self.best_beefy_block { // Set new best BEEFY block number. self.best_beefy_block = Some(block_num); metric_set!(self, beefy_best_block, block_num); - self.on_demand_justifications.cancel_requests_older_than(block_num); - - if let Err(e) = self - .backend - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(block_num)) - .and_then(|hash| { - self.links - .to_rpc_best_block_sender - .notify(|| Ok::<_, ()>(hash)) - .expect("forwards closure result; the closure always returns Ok; qed."); - - self.backend - .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) - }) { - error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); - } + self.client.hash(block_num).ok().flatten().map(|hash| { + self.links + .to_rpc_best_block_sender + .notify(|| Ok::<_, ()>(hash)) + .expect("forwards closure result; the closure always returns Ok; qed.") + }); self.links .to_rpc_justif_sender - .notify(|| Ok::<_, ()>(finality_proof)) + .notify(|| Ok::<_, ()>(signed_commitment)) .expect("forwards closure result; the closure always returns Ok; qed."); } else { debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); } - Ok(()) } /// Handle previously buffered justifications and votes that now land in the voting interval. @@ -532,10 +478,10 @@ where let _ph = PhantomData::::default(); fn to_process_for( - pending: &mut BTreeMap, T>, + pending: &mut BTreeMap, Vec>, (start, end): (NumberFor, NumberFor), _: PhantomData, - ) -> BTreeMap, T> { + ) -> BTreeMap, Vec> { // These are still pending. let still_pending = pending.split_off(&end.saturating_add(1u32.into())); // These can be processed. @@ -545,23 +491,21 @@ where // Return ones to process. to_handle } - // Interval of blocks for which we can process justifications and votes right now. - let mut interval = self.voting_oracle.accepted_interval(best_grandpa)?; // Process pending justifications. + let interval = self.voting_oracle.accepted_interval(best_grandpa)?; if !self.pending_justifications.is_empty() { let justifs_to_handle = to_process_for(&mut self.pending_justifications, interval, _ph); - for (num, justification) in justifs_to_handle.into_iter() { - debug!(target: "beefy", "🥩 Handle buffered justification for: {:?}.", num); - if let Err(err) = self.finalize(justification) { - error!(target: "beefy", "🥩 Error finalizing block: {}", err); + for (num, justifications) in justifs_to_handle.into_iter() { + debug!(target: "beefy", "🥩 Handle buffered justifications for: {:?}.", num); + for justif in justifications.into_iter() { + self.finalize(justif); } } - // Possibly new interval after processing justifications. - interval = self.voting_oracle.accepted_interval(best_grandpa)?; } // Process pending votes. + let interval = self.voting_oracle.accepted_interval(best_grandpa)?; if !self.pending_votes.is_empty() { let votes_to_handle = to_process_for(&mut self.pending_votes, interval, _ph); for (num, votes) in votes_to_handle.into_iter() { @@ -600,29 +544,27 @@ where debug!(target: "beefy", "🥩 Try voting on {}", target_number); // Most of the time we get here, `target` is actually `best_grandpa`, - // avoid getting header from backend in that case. + // avoid asking `client` for header in that case. let target_header = if target_number == *self.best_grandpa_block_header.number() { self.best_grandpa_block_header.clone() } else { - self.backend - .blockchain() - .expect_header(BlockId::Number(target_number)) - .map_err(|err| { - let err_msg = format!( - "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", - target_number, err - ); - Error::Backend(err_msg) - })? + self.client.expect_header(BlockId::Number(target_number)).map_err(|err| { + let err_msg = format!( + "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", + target_number, err + ); + Error::Backend(err_msg) + })? }; let target_hash = target_header.hash(); - let payload = if let Some(hash) = self.payload_provider.payload(&target_header) { + let mmr_root = if let Some(hash) = self.get_mmr_root_digest(&target_header) { hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); return Ok(()) }; + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, mmr_root.encode()); let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if !rounds.should_self_vote(&(payload.clone(), target_number)) { @@ -678,101 +620,37 @@ where Ok(()) } - /// Initialize BEEFY voter state. - /// - /// Should be called only once during worker initialization with latest GRANDPA finalized - /// `header` and the validator set `active` at that point. - fn initialize_voter(&mut self, header: &B::Header, active: ValidatorSet) { - // just a sanity check. - if let Some(rounds) = self.voting_oracle.rounds_mut() { - error!( - target: "beefy", - "🥩 Voting session already initialized at: {:?}, validator set id {}.", - rounds.session_start(), - rounds.validator_set_id(), - ); - return - } - - self.best_grandpa_block_header = header.clone(); - if active.id() == GENESIS_AUTHORITY_SET_ID { - // When starting from genesis, there is no session boundary digest. - // Just initialize `rounds` to Block #1 as BEEFY mandatory block. - info!(target: "beefy", "🥩 Initialize voting session at genesis, block 1."); - self.init_session_at(active, 1u32.into()); - } else { - // TODO (issue #11837): persist local progress to avoid following look-up during init. - let blockchain = self.backend.blockchain(); - let mut header = header.clone(); - - // Walk back the imported blocks and initialize voter either, at the last block with - // a BEEFY justification, or at this session's boundary; voter will resume from there. - loop { - if let Some(true) = blockchain - .justifications(header.hash()) - .ok() - .flatten() - .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) - { - info!( - target: "beefy", - "🥩 Initialize voting session at last BEEFY finalized block: {:?}.", - *header.number() - ); - self.init_session_at(active, *header.number()); - // Mark the round as already finalized. - if let Some(round) = self.voting_oracle.rounds_mut() { - round.conclude(*header.number()); - } - self.best_beefy_block = Some(*header.number()); - break - } - - if let Some(validator_set) = find_authorities_change::(&header) { - info!( - target: "beefy", - "🥩 Initialize voting session at current session boundary: {:?}.", - *header.number() - ); - self.init_session_at(validator_set, *header.number()); - break - } - - // Move up the chain. - header = self - .client - .expect_header(BlockId::Hash(*header.parent_hash())) - // in case of db failure here we want to kill the worker - .expect("db failure, voter going down."); - } - } - } - /// Wait for BEEFY runtime pallet to be available. - /// Should be called only once during worker initialization. - async fn wait_for_runtime_pallet(&mut self, finality: &mut Fuse>) { + async fn wait_for_runtime_pallet(&mut self) { let mut gossip_engine = &mut self.gossip_engine; + let mut finality_stream = self.client.finality_notification_stream().fuse(); loop { futures::select! { - notif = finality.next() => { + notif = finality_stream.next() => { let notif = match notif { Some(notif) => notif, None => break }; let at = BlockId::hash(notif.header.hash()); - if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { - self.initialize_voter(¬if.header, active); - if !self.network.is_major_syncing() { - if let Err(err) = self.try_to_vote() { - debug!(target: "beefy", "🥩 {}", err); - } - } - // Beefy pallet available and voter initialized. - break - } else { - trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); - debug!(target: "beefy", "🥩 Waiting for BEEFY pallet to become available..."); + if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { + if active.id() == GENESIS_AUTHORITY_SET_ID { + // When starting from genesis, there is no session boundary digest. + // Just initialize `rounds` to Block #1 as BEEFY mandatory block. + self.init_session_at(active, 1u32.into()); + } + // In all other cases, we just go without `rounds` initialized, meaning the + // worker won't vote until it witnesses a session change. + // Once we'll implement 'initial sync' (catch-up), the worker will be able to + // start voting right away. + self.handle_finality_notification(¬if); + if let Err(err) = self.try_to_vote() { + debug!(target: "beefy", "🥩 {}", err); } + break + } else { + trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); + debug!(target: "beefy", "🥩 Waiting for BEEFY pallet to become available..."); + } }, _ = gossip_engine => { break @@ -787,15 +665,9 @@ where /// which is driven by finality notifications and gossiped votes. pub(crate) async fn run(mut self) { info!(target: "beefy", "🥩 run BEEFY worker, best grandpa: #{:?}.", self.best_grandpa_block_header.number()); - let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); - // Subscribe to finality notifications before waiting for runtime pallet and reuse stream, - // so we process notifications for all finalized blocks after pallet is available. - let mut finality_notifications = self.client.finality_notification_stream().fuse(); + self.wait_for_runtime_pallet().await; - self.wait_for_runtime_pallet(&mut finality_notifications).await; - trace!(target: "beefy", "🥩 BEEFY pallet available, starting voter."); - - let mut network_events = self.network.event_stream("network-gossip").fuse(); + let mut finality_notifications = self.client.finality_notification_stream().fuse(); let mut votes = Box::pin( self.gossip_engine .messages_for(topic::()) @@ -809,6 +681,7 @@ where }) .fuse(), ); + let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); loop { let mut gossip_engine = &mut self.gossip_engine; @@ -816,38 +689,15 @@ where // The branches below only change 'state', actual voting happen afterwards, // based on the new resulting 'state'. futures::select_biased! { - // Use `select_biased!` to prioritize order below. - // Make sure to pump gossip engine. - _ = gossip_engine => { - error!(target: "beefy", "🥩 Gossip engine has terminated, closing worker."); - return; - }, - // Keep track of connected peers. - net_event = network_events.next() => { - if let Some(net_event) = net_event { - self.handle_network_event(net_event); - } else { - error!(target: "beefy", "🥩 Network events stream terminated, closing worker."); - return; - } - }, - // Process finality notifications first since these drive the voter. notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); } else { - error!(target: "beefy", "🥩 Finality stream terminated, closing worker."); return; } }, - // Process incoming justifications as these can make some in-flight votes obsolete. - justif = self.on_demand_justifications.next().fuse() => { - if let Some(justif) = justif { - if let Err(err) = self.triage_incoming_justif(justif) { - debug!(target: "beefy", "🥩 {}", err); - } - } - }, + // TODO: when adding SYNC protocol, join the on-demand justifications stream to + // this one, and handle them both here. justif = block_import_justif.next() => { if let Some(justif) = justif { // Block import justifications have already been verified to be valid @@ -856,11 +706,9 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { - error!(target: "beefy", "🥩 Block import stream terminated, closing worker."); return; } }, - // Finally process incoming votes. vote = votes.next() => { if let Some(vote) = vote { // Votes have already been verified to be valid by the gossip validator. @@ -868,48 +716,43 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { - error!(target: "beefy", "🥩 Votes gossiping stream terminated, closing worker."); return; } }, + _ = gossip_engine => { + error!(target: "beefy", "🥩 Gossip engine has terminated."); + return; + } } - // Handle pending justifications and/or votes for now GRANDPA finalized blocks. - if let Err(err) = self.try_pending_justif_and_votes() { - debug!(target: "beefy", "🥩 {}", err); - } - - // Don't bother voting or requesting justifications during major sync. - if !self.network.is_major_syncing() { - // If the current target is a mandatory block, - // make sure there's also an on-demand justification request out for it. - if let Some(block) = self.voting_oracle.mandatory_pending() { - // This only starts new request if there isn't already an active one. - self.on_demand_justifications.request(block); + // Don't bother acting on 'state' changes during major sync. + if !self.sync_oracle.is_major_syncing() { + // Handle pending justifications and/or votes for now GRANDPA finalized blocks. + if let Err(err) = self.try_pending_justif_and_votes() { + debug!(target: "beefy", "🥩 {}", err); } + // There were external events, 'state' is changed, author a vote if needed/possible. if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); } - } else { - debug!(target: "beefy", "🥩 Skipping voting while major syncing."); } } } +} - /// Update known peers based on network events. - fn handle_network_event(&mut self, event: NetEvent) { - match event { - NetEvent::SyncConnected { remote } => { - self.known_peers.lock().add_new(remote); - }, - NetEvent::SyncDisconnected { remote } => { - self.known_peers.lock().remove(&remote); - }, - // We don't care about other events. - _ => (), - } - } +/// Extract the MMR root hash from a digest in the given header, if it exists. +fn find_mmr_root_digest(header: &B::Header) -> Option +where + B: Block, +{ + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::MmrRoot(root) => Some(root), + _ => None, + }; + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) } /// Scan the `header` digest log for a BEEFY validator set change. Return either the new @@ -988,47 +831,38 @@ where pub(crate) mod tests { use super::*; use crate::{ - communication::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, keystore::tests::Keyring, + notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, - BeefyPeer, BeefyTestNet, + BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, }, BeefyRPCLinks, }; - use beefy_primitives::{known_payloads, mmr::MmrRootProvider}; use futures::{executor::block_on, future::poll_fn, task::Poll}; - use sc_client_api::{Backend as BackendT, HeaderBackend}; + + use sc_client_api::HeaderBackend; use sc_network::NetworkService; use sc_network_test::{PeersFullClient, TestNetFactory}; use sp_api::HeaderT; - use sp_blockchain::Backend as BlockchainBackendT; use substrate_test_runtime_client::{ runtime::{Block, Digest, DigestItem, Header, H256}, - Backend, ClientExt, + Backend, }; fn create_beefy_worker( peer: &BeefyPeer, key: &Keyring, min_block_delta: u32, - ) -> BeefyWorker< - Block, - Backend, - PeersFullClient, - MmrRootProvider, - TestApi, - Arc>, - > { + ) -> BeefyWorker>> { let keystore = create_beefy_keystore(*key); let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefySignedCommitmentStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); - let (_, from_block_import_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + let (_, from_block_import_justif_stream) = BeefySignedCommitmentStream::::channel(); let beefy_rpc_links = BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; @@ -1042,33 +876,23 @@ pub(crate) mod tests { let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone())); + let sync_oracle = network.clone(); + let gossip_validator = Arc::new(crate::gossip::GossipValidator::new()); let gossip_engine = - GossipEngine::new(network.clone(), "/beefy/1", gossip_validator.clone(), None); - let on_demand_justifications = OnDemandJustificationsEngine::new( - network.clone(), - api.clone(), - "/beefy/justifs/1".into(), - known_peers.clone(), - ); - let payload_provider = MmrRootProvider::new(api.clone()); + GossipEngine::new(network, BEEFY_PROTOCOL_NAME, gossip_validator.clone(), None); let worker_params = crate::worker::WorkerParams { client: peer.client().as_client(), backend: peer.client().as_backend(), - payload_provider, runtime: api, key_store: Some(keystore).into(), - known_peers, links, gossip_engine, gossip_validator, min_block_delta, metrics: None, - network, - on_demand_justifications, + sync_oracle, }; - BeefyWorker::<_, _, _, _, _, _>::new(worker_params) + BeefyWorker::<_, _, _, _, _>::new(worker_params) } #[test] @@ -1290,11 +1114,35 @@ pub(crate) mod tests { assert_eq!(extracted, Some(validator_set)); } + #[test] + fn extract_mmr_root_digest() { + let mut header = Header::new( + 1u32.into(), + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); + + // verify empty digest shows nothing + assert!(find_mmr_root_digest::(&header).is_none()); + + let mmr_root_hash = H256::random(); + header.digest_mut().push(DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::::MmrRoot(mmr_root_hash.clone()).encode(), + )); + + // verify validator set is correctly extracted from digest + let extracted = find_mmr_root_digest::(&header); + assert_eq!(extracted, Some(mmr_root_hash)); + } + #[test] fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); + let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); // keystore doesn't contain other keys than validators' @@ -1314,53 +1162,43 @@ pub(crate) mod tests { } #[test] - fn should_finalize_correctly() { - let keys = [Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(&keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); - let backend = net.peer(0).client().as_backend(); + fn test_finalize() { + let keys = &[Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let keys = keys.iter().cloned().enumerate(); - let (mut best_block_streams, mut finality_proofs) = - get_beefy_streams(&mut net, keys.clone()); + let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let mut finality_proof = finality_proofs.drain(..).next().unwrap(); + let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); - let create_finality_proof = |block_num: NumberFor| { + let create_signed_commitment = |block_num: NumberFor| { let commitment = Commitment { - payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; - VersionedFinalityProof::V1(SignedCommitment { commitment, signatures: vec![None] }) + SignedCommitment { commitment, signatures: vec![None] } }; - // no 'best beefy block' or finality proofs + // no 'best beefy block' or signed commitments assert_eq!(worker.best_beefy_block, None); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); - assert_eq!(finality_proof.poll_next_unpin(cx), Poll::Pending); + assert_eq!(signed_commitments.poll_next_unpin(cx), Poll::Pending); Poll::Ready(()) })); // unknown hash for block #1 - let (mut best_block_streams, mut finality_proofs) = - get_beefy_streams(&mut net, keys.clone()); + let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let mut finality_proof = finality_proofs.drain(..).next().unwrap(); - let justif = create_finality_proof(1); - // create new session at block #1 - worker.voting_oracle.add_session(Rounds::new(1, validator_set.clone())); - // try to finalize block #1 - worker.finalize(justif.clone()).unwrap(); - // verify block finalized + let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); + let justif = create_signed_commitment(1); + worker.finalize(justif.clone()); assert_eq!(worker.best_beefy_block, Some(1)); block_on(poll_fn(move |cx| { - // unknown hash -> nothing streamed assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); - // commitment streamed - match finality_proof.poll_next_unpin(cx) { + match signed_commitments.poll_next_unpin(cx) { // expect justification Poll::Ready(Some(received)) => assert_eq!(received, justif), v => panic!("unexpected value: {:?}", v), @@ -1371,22 +1209,10 @@ pub(crate) mod tests { // generate 2 blocks, try again expect success let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - net.peer(0).push_blocks(2, false); - // finalize 1 and 2 without justifications - let hashof1 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); - let hashof2 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); - backend.finalize_block(hashof1, None).unwrap(); - backend.finalize_block(hashof2, None).unwrap(); - - let justif = create_finality_proof(2); - // create new session at block #2 - worker.voting_oracle.add_session(Rounds::new(2, validator_set)); - worker.finalize(justif).unwrap(); - // verify old session pruned - assert_eq!(worker.voting_oracle.sessions.len(), 1); - // new session starting at #2 is in front - assert_eq!(worker.voting_oracle.rounds_mut().unwrap().session_start(), 2); - // verify block finalized + net.generate_blocks(2, 10, &validator_set, false); + + let justif = create_signed_commitment(2); + worker.finalize(justif); assert_eq!(worker.best_beefy_block, Some(2)); block_on(poll_fn(move |cx| { match best_block_stream.poll_next_unpin(cx) { @@ -1399,17 +1225,13 @@ pub(crate) mod tests { } Poll::Ready(()) })); - - // check BEEFY justifications are also appended to backend - let justifs = backend.blockchain().justifications(hashof2).unwrap().unwrap(); - assert!(justifs.get(BEEFY_ENGINE_ID).is_some()) } #[test] fn should_init_session() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); + let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); assert!(worker.voting_oracle.sessions.is_empty()); @@ -1443,14 +1265,14 @@ pub(crate) mod tests { fn should_triage_votes_and_process_later() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); + let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); fn new_vote( block_number: NumberFor, ) -> VoteMessage, AuthorityId, Signature> { let commitment = Commitment { - payload: Payload::from_single_entry(*b"BF", vec![]), + payload: Payload::new(*b"BF", vec![]), block_number, validator_set_id: 0, }; @@ -1499,111 +1321,4 @@ pub(crate) mod tests { assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); } - - #[test] - fn should_initialize_correct_voter() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let mut net = BeefyTestNet::new(1); - let backend = net.peer(0).client().as_backend(); - - // push 15 blocks with `AuthorityChange` digests every 10 blocks - net.generate_blocks_and_sync(15, 10, &validator_set, false); - // finalize 13 without justifications - let hashof13 = - backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); - - // Test initialization at session boundary. - { - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - - // initialize voter at block 13, expect rounds initialized at session_start = 10 - let header = backend.blockchain().header(BlockId::number(13)).unwrap().unwrap(); - worker.initialize_voter(&header, validator_set.clone()); - - // verify voter initialized with single session starting at block 10 - assert_eq!(worker.voting_oracle.sessions.len(), 1); - let rounds = worker.voting_oracle.rounds_mut().unwrap(); - assert_eq!(rounds.session_start(), 10); - assert_eq!(rounds.validator_set_id(), validator_set.id()); - - // verify next vote target is mandatory block 10 - assert_eq!(worker.best_beefy_block, None); - assert_eq!(*worker.best_grandpa_block_header.number(), 13); - assert_eq!(worker.voting_oracle.voting_target(worker.best_beefy_block, 13), Some(10)); - } - - // Test corner-case where session boundary == last beefy finalized. - { - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - - // import/append BEEFY justification for session boundary block 10 - let commitment = Commitment { - payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), - block_number: 10, - validator_set_id: validator_set.id(), - }; - let justif = VersionedFinalityProof::<_, Signature>::V1(SignedCommitment { - commitment, - signatures: vec![None], - }); - let hashof10 = - backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); - backend - .append_justification(hashof10, (BEEFY_ENGINE_ID, justif.encode())) - .unwrap(); - - // initialize voter at block 13, expect rounds initialized at last beefy finalized 10 - let header = backend.blockchain().header(BlockId::number(13)).unwrap().unwrap(); - worker.initialize_voter(&header, validator_set.clone()); - - // verify voter initialized with single session starting at block 10 - assert_eq!(worker.voting_oracle.sessions.len(), 1); - let rounds = worker.voting_oracle.rounds_mut().unwrap(); - assert_eq!(rounds.session_start(), 10); - assert_eq!(rounds.validator_set_id(), validator_set.id()); - - // verify next vote target is mandatory block 10 - assert_eq!(worker.best_beefy_block, Some(10)); - assert_eq!(*worker.best_grandpa_block_header.number(), 13); - assert_eq!(worker.voting_oracle.voting_target(worker.best_beefy_block, 13), Some(12)); - } - - // Test initialization at last BEEFY finalized. - { - let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - - // import/append BEEFY justification for block 12 - let commitment = Commitment { - payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), - block_number: 12, - validator_set_id: validator_set.id(), - }; - let justif = VersionedFinalityProof::<_, Signature>::V1(SignedCommitment { - commitment, - signatures: vec![None], - }); - let hashof12 = - backend.blockchain().expect_block_hash_from_id(&BlockId::Number(12)).unwrap(); - backend - .append_justification(hashof12, (BEEFY_ENGINE_ID, justif.encode())) - .unwrap(); - - // initialize voter at block 13, expect rounds initialized at last beefy finalized 12 - let header = backend.blockchain().header(BlockId::number(13)).unwrap().unwrap(); - worker.initialize_voter(&header, validator_set.clone()); - - // verify voter initialized with single session starting at block 12 - assert_eq!(worker.voting_oracle.sessions.len(), 1); - let rounds = worker.voting_oracle.rounds_mut().unwrap(); - assert_eq!(rounds.session_start(), 12); - assert_eq!(rounds.validator_set_id(), validator_set.id()); - - // verify next vote target is 13 - assert_eq!(worker.best_beefy_block, Some(12)); - assert_eq!(*worker.best_grandpa_block_header.number(), 13); - assert_eq!(worker.voting_oracle.voting_target(worker.best_beefy_block, 13), Some(13)); - } - } } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index b6c2ac3ba5d68..803e9c1e8bf26 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -258,11 +258,12 @@ where let proof = self.api.extract_proof(); - let state = self.backend.state_at(self.parent_hash)?; + let state = self.backend.state_at(self.block_id)?; + let parent_hash = self.parent_hash; let storage_changes = self .api - .into_storage_changes(&state, self.parent_hash) + .into_storage_changes(&state, parent_hash) .map_err(sp_blockchain::Error::StorageChanges)?; Ok(BuiltBlock { diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index b38dba03d6b7f..6ab559dea46fd 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -17,9 +17,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } impl-trait-for-tuples = "0.2.2" memmap2 = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.79" sc-chain-spec-derive = { version = "4.0.0-dev", path = "./derive" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-network = { version = "0.10.0-dev", path = "../network" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 2cc9f356e4df7..5aafc28524dbf 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -20,7 +20,7 @@ #![warn(missing_docs)] use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; -use sc_network_common::config::MultiaddrWithPeerId; +use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use serde_json as json; diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 9d2cc728b8288..73d3e1af15492 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -184,7 +184,7 @@ pub use extension::{ }; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -use sc_network_common::config::MultiaddrWithPeerId; +use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index f749b9b5b0c4a..ea60e4c9f87e5 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -13,20 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" chrono = "0.4.10" -clap = { version = "4.0.9", features = ["derive", "string"] } +clap = { version = "3.1.18", features = ["derive"] } fdlimit = "0.2.1" futures = "0.3.21" -libp2p = "0.49.0" +hex = "0.4.2" +libp2p = "0.46.1" log = "0.4.17" names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" rand = "0.7.3" -regex = "1.6.0" -rpassword = "7.0.0" +regex = "1.5.5" +rpassword = "5.0.0" serde = "1.0.136" -serde_json = "1.0.85" +serde_json = "1.0.79" thiserror = "1.0.30" tiny-bip39 = "0.8.2" tokio = { version = "1.17.0", features = ["signal", "rt-multi-thread", "parking_lot"] } @@ -34,7 +34,6 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../service" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } @@ -51,6 +50,6 @@ sp-version = { version = "5.0.0", path = "../../primitives/version" } tempfile = "3.1.0" [features] -default = ["rocksdb", "wasmtime"] +default = ["rocksdb"] rocksdb = ["sc-client-db/rocksdb"] wasmtime = ["sc-service/wasmtime"] diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index d761c854a6f0d..283fef985dfb9 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -16,13 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Definitions of [`ValueEnum`] types. +//! Definitions of [`ArgEnum`] types. -use clap::{builder::PossibleValue, ValueEnum}; +use clap::ArgEnum; /// The instantiation strategy to use in compiled mode. -#[derive(Debug, Clone, Copy, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Clone, Copy, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum WasmtimeInstantiationStrategy { /// Pool the instances to avoid initializing everything from scratch /// on each instantiation. Use copy-on-write memory when possible. @@ -59,22 +59,20 @@ pub enum WasmExecutionMethod { Compiled, } -const INTERPRETED_NAME: &str = "interpreted-i-know-what-i-do"; - -impl clap::ValueEnum for WasmExecutionMethod { - /// All possible argument values, in display order. - fn value_variants<'a>() -> &'a [Self] { - let variants = &[Self::Interpreted, Self::Compiled]; - if cfg!(feature = "wasmtime") { - variants - } else { - &variants[..1] +impl std::fmt::Display for WasmExecutionMethod { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Interpreted => write!(f, "Interpreted"), + Self::Compiled => write!(f, "Compiled"), } } +} + +impl std::str::FromStr for WasmExecutionMethod { + type Err = String; - /// Parse an argument into `Self`. - fn from_str(s: &str, _: bool) -> Result { - if s.eq_ignore_ascii_case(INTERPRETED_NAME) { + fn from_str(s: &str) -> Result { + if s.eq_ignore_ascii_case("interpreted-i-know-what-i-do") { Ok(Self::Interpreted) } else if s.eq_ignore_ascii_case("compiled") { #[cfg(feature = "wasmtime")] @@ -86,29 +84,19 @@ impl clap::ValueEnum for WasmExecutionMethod { Err("`Compiled` variant requires the `wasmtime` feature to be enabled".into()) } } else { - Err(format!("Unknown variant `{}`", s)) - } - } - - /// The canonical argument value. - /// - /// The value is `None` for skipped variants. - fn to_possible_value(&self) -> Option { - match self { - #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => Some(PossibleValue::new("compiled")), - #[cfg(not(feature = "wasmtime"))] - WasmExecutionMethod::Compiled => None, - WasmExecutionMethod::Interpreted => Some(PossibleValue::new(INTERPRETED_NAME)), + Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants())) } } } -impl std::fmt::Display for WasmExecutionMethod { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Interpreted => write!(f, "Interpreted"), - Self::Compiled => write!(f, "Compiled"), +impl WasmExecutionMethod { + /// Returns all the variants of this enum to be shown in the cli. + pub fn variants() -> &'static [&'static str] { + let variants = &["interpreted-i-know-what-i-do", "compiled"]; + if cfg!(feature = "wasmtime") { + variants + } else { + &variants[..1] } } } @@ -145,15 +133,15 @@ pub fn execution_method_from_cli( /// The default [`WasmExecutionMethod`]. #[cfg(feature = "wasmtime")] -pub const DEFAULT_WASM_EXECUTION_METHOD: WasmExecutionMethod = WasmExecutionMethod::Compiled; +pub const DEFAULT_WASM_EXECUTION_METHOD: &str = "compiled"; /// The default [`WasmExecutionMethod`]. #[cfg(not(feature = "wasmtime"))] -pub const DEFAULT_WASM_EXECUTION_METHOD: WasmExecutionMethod = WasmExecutionMethod::Interpreted; +pub const DEFAULT_WASM_EXECUTION_METHOD: &str = "interpreted-i-know-what-i-do"; #[allow(missing_docs)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum TracingReceiver { /// Output the tracing records using the log. Log, @@ -168,16 +156,16 @@ impl Into for TracingReceiver { } /// The type of the node key. -#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum NodeKeyType { /// Use ed25519. Ed25519, } /// The crypto scheme to use. -#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum CryptoScheme { /// Use ed25519. Ed25519, @@ -188,8 +176,8 @@ pub enum CryptoScheme { } /// The type of the output format. -#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum OutputType { /// Output as json. Json, @@ -198,8 +186,8 @@ pub enum OutputType { } /// How to execute blocks -#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum ExecutionStrategy { /// Execute with native build (if available, WebAssembly otherwise). Native, @@ -224,8 +212,8 @@ impl Into for ExecutionStrategy { /// Available RPC methods. #[allow(missing_docs)] -#[derive(Debug, Copy, Clone, PartialEq, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum RpcMethods { /// Expose every RPC method only when RPC is listening on `localhost`, /// otherwise serve only safe RPC methods. @@ -247,8 +235,7 @@ impl Into for RpcMethods { } /// Database backend -#[derive(Debug, Clone, PartialEq, Copy, clap::ValueEnum)] -#[value(rename_all = "lower")] +#[derive(Debug, Clone, PartialEq, Copy)] pub enum Database { /// Facebooks RocksDB #[cfg(feature = "rocksdb")] @@ -259,10 +246,29 @@ pub enum Database { /// instance of ParityDb Auto, /// ParityDb. - #[value(name = "paritydb-experimental")] ParityDbDeprecated, } +impl std::str::FromStr for Database { + type Err = String; + + fn from_str(s: &str) -> Result { + #[cfg(feature = "rocksdb")] + if s.eq_ignore_ascii_case("rocksdb") { + return Ok(Self::RocksDb) + } + if s.eq_ignore_ascii_case("paritydb-experimental") { + return Ok(Self::ParityDbDeprecated) + } else if s.eq_ignore_ascii_case("paritydb") { + return Ok(Self::ParityDb) + } else if s.eq_ignore_ascii_case("auto") { + Ok(Self::Auto) + } else { + Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants())) + } + } +} + impl Database { /// Returns all the variants of this enum to be shown in the cli. pub const fn variants() -> &'static [&'static str] { @@ -278,8 +284,8 @@ impl Database { /// Whether off-chain workers are enabled. #[allow(missing_docs)] -#[derive(Debug, Clone, ValueEnum)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Clone, ArgEnum)] +#[clap(rename_all = "kebab-case")] pub enum OffchainWorkerEnabled { /// Always have offchain worker enabled. Always, @@ -290,8 +296,8 @@ pub enum OffchainWorkerEnabled { } /// Syncing mode. -#[derive(Debug, Clone, Copy, ValueEnum, PartialEq)] -#[value(rename_all = "kebab-case")] +#[derive(Debug, Clone, Copy, ArgEnum, PartialEq)] +#[clap(rename_all = "kebab-case")] pub enum SyncMode { /// Full sync. Download end verify all blocks. Full, diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 5ab3ce9e88a09..3196a3e7b915f 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -34,14 +34,14 @@ use std::io::Write; #[derive(Debug, Clone, Parser)] pub struct BuildSpecCmd { /// Force raw genesis storage output. - #[arg(long)] + #[clap(long)] pub raw: bool, /// Disable adding the default bootnode to the specification. /// /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the /// specification when no bootnode exists. - #[arg(long)] + #[clap(long)] pub disable_default_bootnode: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index cbc22cc4d52d9..0e57d1677efbb 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -73,10 +73,11 @@ impl ChainInfoCmd { B: BlockT, { let db_config = sc_client_db::DatabaseSettings { - trie_cache_maximum_size: config.trie_cache_maximum_size, + state_cache_size: config.state_cache_size, + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - blocks_pruning: config.blocks_pruning, + keep_blocks: config.keep_blocks.clone(), }; let backend = sc_service::new_db_backend::(db_config)?; let info: ChainInfo = backend.blockchain().info().into(); diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index 3e25eab2c4350..b7e69b1360a0a 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -30,13 +30,13 @@ use std::{fmt::Debug, str::FromStr, sync::Arc}; #[derive(Debug, Clone, Parser)] pub struct CheckBlockCmd { /// Block hash or number - #[arg(value_name = "HASH or NUMBER")] + #[clap(value_name = "HASH or NUMBER")] pub input: BlockNumberOrHash, /// The default number of 64KB pages to ever allocate for Wasm execution. /// /// Don't alter this unless you know what you're doing. - #[arg(long, value_name = "COUNT")] + #[clap(long, value_name = "COUNT")] pub default_heap_pages: Option, #[allow(missing_docs)] diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index e2f83200e511c..ff35b5a51fcad 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -32,23 +32,23 @@ use std::{fmt::Debug, fs, io, path::PathBuf, str::FromStr, sync::Arc}; #[derive(Debug, Clone, Parser)] pub struct ExportBlocksCmd { /// Output file name or stdout if unspecified. - #[arg()] + #[clap(parse(from_os_str))] pub output: Option, /// Specify starting block number. /// /// Default is 1. - #[arg(long, value_name = "BLOCK")] + #[clap(long, value_name = "BLOCK")] pub from: Option, /// Specify last block number. /// /// Default is best block. - #[arg(long, value_name = "BLOCK")] + #[clap(long, value_name = "BLOCK")] pub to: Option, /// Use binary output rather than JSON. - #[arg(long)] + #[clap(long)] pub binary: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 04bce0c1d707a..b76724caf0fef 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -23,7 +23,7 @@ use crate::{ }; use clap::Parser; use log::info; -use sc_client_api::{HeaderBackend, StorageProvider, UsageProvider}; +use sc_client_api::{StorageProvider, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{fmt::Debug, io::Write, str::FromStr, sync::Arc}; @@ -32,7 +32,7 @@ use std::{fmt::Debug, io::Write, str::FromStr, sync::Arc}; #[derive(Debug, Clone, Parser)] pub struct ExportStateCmd { /// Block hash or number. - #[arg(value_name = "HASH or NUMBER")] + #[clap(value_name = "HASH or NUMBER")] pub input: Option, #[allow(missing_docs)] @@ -57,7 +57,7 @@ impl ExportStateCmd { ) -> error::Result<()> where B: BlockT, - C: UsageProvider + StorageProvider + HeaderBackend, + C: UsageProvider + StorageProvider, BA: sc_client_api::backend::Backend, B::Hash: FromStr, ::Err: Debug, @@ -65,11 +65,7 @@ impl ExportStateCmd { { info!("Exporting raw state..."); let block_id = self.input.as_ref().map(|b| b.parse()).transpose()?; - let hash = match block_id { - Some(id) => client.expect_block_hash_from_id(&id)?, - None => client.usage_info().chain.best_hash, - }; - let raw_state = sc_service::chain_ops::export_raw_state(client, hash)?; + let raw_state = sc_service::chain_ops::export_raw_state(client, block_id)?; input_spec.set_storage(raw_state); info!("Generating new chain spec..."); diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs index 461cb98bc2e51..5b1b708f8669c 100644 --- a/client/cli/src/commands/generate.rs +++ b/client/cli/src/commands/generate.rs @@ -25,10 +25,10 @@ use clap::Parser; /// The `generate` command #[derive(Debug, Clone, Parser)] -#[command(name = "generate", about = "Generate a random account")] +#[clap(name = "generate", about = "Generate a random account")] pub struct GenerateCmd { /// The number of words in the phrase to generate. One of 12 (default), 15, 18, 21 and 24. - #[arg(short = 'w', long, value_name = "WORDS")] + #[clap(short = 'w', long, value_name = "WORDS")] words: Option, #[allow(missing_docs)] diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index e84b4a71d6d72..6b2f12531458c 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -28,7 +28,7 @@ use std::{ /// The `generate-node-key` command #[derive(Debug, Parser)] -#[command( +#[clap( name = "generate-node-key", about = "Generate a random node key, write it to a file or stdout \ and write the corresponding peer-id to stderr" @@ -37,13 +37,13 @@ pub struct GenerateNodeKeyCmd { /// Name of file to save secret key to. /// /// If not given, the secret key is printed to stdout. - #[arg(long)] + #[clap(long)] file: Option, /// The output is in raw binary format. /// /// If not given, the output is written as an hex encoded string. - #[arg(long)] + #[clap(long)] bin: bool, } @@ -57,7 +57,7 @@ impl GenerateNodeKeyCmd { let file_data = if self.bin { secret.as_ref().to_owned() } else { - array_bytes::bytes2hex("", secret.as_ref()).into_bytes() + hex::encode(secret.as_ref()).into_bytes() }; match &self.file { @@ -85,6 +85,6 @@ mod tests { assert!(generate.run().is_ok()); let mut buf = String::new(); assert!(file.read_to_string(&mut buf).is_ok()); - assert!(array_bytes::hex2bytes(&buf).is_ok()); + assert!(hex::decode(buf).is_ok()); } } diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index debc697242ddd..749824834bf7b 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -37,17 +37,17 @@ use std::{ #[derive(Debug, Parser)] pub struct ImportBlocksCmd { /// Input file or stdin if unspecified. - #[arg()] + #[clap(parse(from_os_str))] pub input: Option, /// The default number of 64KB pages to ever allocate for Wasm execution. /// /// Don't alter this unless you know what you're doing. - #[arg(long, value_name = "COUNT")] + #[clap(long, value_name = "COUNT")] pub default_heap_pages: Option, /// Try importing blocks from binary format rather than JSON. - #[arg(long)] + #[clap(long)] pub binary: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index 7d66a680df8c0..68201d7b4bffc 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -29,16 +29,16 @@ use std::sync::Arc; /// The `insert` command #[derive(Debug, Clone, Parser)] -#[command(name = "insert", about = "Insert a key to the keystore of a node.")] +#[clap(name = "insert", about = "Insert a key to the keystore of a node.")] pub struct InsertKeyCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. /// If not given, you will be prompted for the URI. - #[arg(long)] + #[clap(long)] suri: Option, /// Key type, examples: "gran", or "imon" - #[arg(long)] + #[clap(long)] key_type: String, #[allow(missing_docs)] @@ -50,7 +50,7 @@ pub struct InsertKeyCmd { pub keystore_params: KeystoreParams, /// The cryptography scheme that should be used to generate the key out of the given URI. - #[arg(long, value_name = "SCHEME", value_enum, ignore_case = true)] + #[clap(long, value_name = "SCHEME", arg_enum, ignore_case = true)] pub scheme: CryptoScheme, } @@ -60,7 +60,7 @@ impl InsertKeyCmd { let suri = utils::read_uri(self.suri.as_ref())?; let base_path = self .shared_params - .base_path()? + .base_path() .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); let chain_id = self.shared_params.chain_id(self.shared_params.is_dev()); let chain_spec = cli.load_spec(&chain_id)?; diff --git a/client/cli/src/commands/inspect_key.rs b/client/cli/src/commands/inspect_key.rs index 369fd10926dce..14bb059503df9 100644 --- a/client/cli/src/commands/inspect_key.rs +++ b/client/cli/src/commands/inspect_key.rs @@ -27,7 +27,7 @@ use std::str::FromStr; /// The `inspect` command #[derive(Debug, Parser)] -#[command( +#[clap( name = "inspect", about = "Gets a public key and a SS58 address from the provided Secret URI" )] @@ -44,7 +44,7 @@ pub struct InspectKeyCmd { uri: Option, /// Is the given `uri` a hex encoded public key? - #[arg(long)] + #[clap(long)] public: bool, #[allow(missing_docs)] @@ -72,7 +72,7 @@ pub struct InspectKeyCmd { /// /// If there is no derivation in `--uri`, the public key will be checked against the public key /// of `--uri` directly. - #[arg(long, conflicts_with = "public")] + #[clap(long, conflicts_with = "public")] pub expect_public: Option, } @@ -127,7 +127,7 @@ fn expect_public_from_phrase( ) -> Result<(), Error> { let secret_uri = SecretUri::from_str(suri).map_err(|e| format!("{:?}", e))?; let expected_public = if let Some(public) = expect_public.strip_prefix("0x") { - let hex_public = array_bytes::hex2bytes(public) + let hex_public = hex::decode(&public) .map_err(|_| format!("Invalid expected public key hex: `{}`", expect_public))?; Pair::Public::try_from(&hex_public) .map_err(|_| format!("Invalid expected public key: `{}`", expect_public))? @@ -208,7 +208,7 @@ mod tests { .expect("Valid") .0 .public(); - let valid_public_hex = array_bytes::bytes2hex("0x", valid_public.as_slice()); + let valid_public_hex = format!("0x{}", hex::encode(valid_public.as_slice())); let valid_accountid = format!("{}", valid_public.into_account()); // It should fail with the invalid public key @@ -226,7 +226,7 @@ mod tests { .0 .public(); let valid_public_hex_with_password = - array_bytes::bytes2hex("0x", valid_public_with_password.as_slice()); + format!("0x{}", hex::encode(&valid_public_with_password.as_slice())); let valid_accountid_with_password = format!("{}", &valid_public_with_password.into_account()); @@ -248,7 +248,7 @@ mod tests { .0 .public(); let valid_public_hex_with_password_and_derivation = - array_bytes::bytes2hex("0x", valid_public_with_password_and_derivation.as_slice()); + format!("0x{}", hex::encode(&valid_public_with_password_and_derivation.as_slice())); // They should still be valid, because we check the base secret key. check_cmd(&seed_with_password_and_derivation, &valid_public_hex_with_password, true); diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index 9300007cb6bf2..e1617c1d085df 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -28,7 +28,7 @@ use std::{ /// The `inspect-node-key` command #[derive(Debug, Parser)] -#[command( +#[clap( name = "inspect-node-key", about = "Load a node key from a file or stdin and print the corresponding peer-id." )] @@ -36,18 +36,18 @@ pub struct InspectNodeKeyCmd { /// Name of file to read the secret key from. /// /// If not given, the secret key is read from stdin (up to EOF). - #[arg(long)] + #[clap(long)] file: Option, /// The input is in raw binary format. /// /// If not given, the input is read as an hex encoded string. - #[arg(long)] + #[clap(long)] bin: bool, /// This argument is deprecated and has no effect for this command. #[deprecated(note = "Network identifier is not used for node-key inspection")] - #[arg(short = 'n', long = "network", value_name = "NETWORK", ignore_case = true)] + #[clap(short = 'n', long = "network", value_name = "NETWORK", ignore_case = true)] pub network_scheme: Option, } @@ -66,8 +66,7 @@ impl InspectNodeKeyCmd { if !self.bin { // With hex input, give to the user a bit of tolerance about whitespaces let keyhex = String::from_utf8_lossy(&file_data); - file_data = array_bytes::hex2bytes(keyhex.trim()) - .map_err(|_| "failed to decode secret as hex")?; + file_data = hex::decode(keyhex.trim()).map_err(|_| "failed to decode secret as hex")?; } let secret = diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 9a3aeee50e944..b89487a18f779 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -33,7 +33,7 @@ use std::{ #[derive(Debug, Clone, Parser)] pub struct PurgeChainCmd { /// Skip interactive prompt by answering yes automatically. - #[arg(short = 'y')] + #[clap(short = 'y')] pub yes: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index 8477630cf9404..f65e348b37b89 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -31,7 +31,7 @@ use std::{fmt::Debug, str::FromStr, sync::Arc}; #[derive(Debug, Parser)] pub struct RevertCmd { /// Number of blocks to revert. - #[arg(default_value = "256")] + #[clap(default_value = "256")] pub num: GenericNumber, #[allow(missing_docs)] diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 35181d83f805f..3a74fdd9700f2 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -42,27 +42,27 @@ pub struct RunCmd { /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). - #[arg(long)] + #[clap(long)] pub validator: bool, /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA /// observer. - #[arg(long)] + #[clap(long)] pub no_grandpa: bool, /// Listen to all RPC interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. - #[arg(long)] + #[clap(long)] pub rpc_external: bool, /// Listen to all RPC interfaces. /// /// Same as `--rpc-external`. - #[arg(long)] + #[clap(long)] pub unsafe_rpc_external: bool, /// RPC methods to expose. @@ -71,12 +71,12 @@ pub struct RunCmd { /// - `safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. /// - `auto`: Acts as `safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is /// passed, otherwise acts as `unsafe`. - #[arg( + #[clap( long, value_name = "METHOD SET", - value_enum, + arg_enum, ignore_case = true, - default_value_t = RpcMethods::Auto, + default_value = "auto", verbatim_doc_comment )] pub rpc_methods: RpcMethods, @@ -85,61 +85,61 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. - #[arg(long)] + #[clap(long)] pub ws_external: bool, /// Listen to all Websocket interfaces. /// /// Same as `--ws-external` but doesn't warn you about it. - #[arg(long)] + #[clap(long)] pub unsafe_ws_external: bool, /// DEPRECATED, this has no affect anymore. Use `rpc_max_request_size` or /// `rpc_max_response_size` instead. - #[arg(long)] + #[clap(long)] pub rpc_max_payload: Option, /// Set the the maximum RPC request payload size for both HTTP and WS in megabytes. /// Default is 15MiB. - #[arg(long)] + #[clap(long)] pub rpc_max_request_size: Option, /// Set the the maximum RPC response payload size for both HTTP and WS in megabytes. /// Default is 15MiB. - #[arg(long)] + #[clap(long)] pub rpc_max_response_size: Option, /// Set the the maximum concurrent subscriptions per connection. /// Default is 1024. - #[arg(long)] + #[clap(long)] pub rpc_max_subscriptions_per_connection: Option, /// Expose Prometheus exporter on all interfaces. /// /// Default is local. - #[arg(long)] + #[clap(long)] pub prometheus_external: bool, /// DEPRECATED, IPC support has been removed. - #[arg(long, value_name = "PATH")] + #[clap(long, value_name = "PATH")] pub ipc_path: Option, /// Specify HTTP RPC server TCP port. - #[arg(long, value_name = "PORT")] + #[clap(long, value_name = "PORT")] pub rpc_port: Option, /// Specify WebSockets RPC server TCP port. - #[arg(long, value_name = "PORT")] + #[clap(long, value_name = "PORT")] pub ws_port: Option, /// Maximum number of WS RPC server connections. - #[arg(long, value_name = "COUNT")] + #[clap(long, value_name = "COUNT")] pub ws_max_connections: Option, /// DEPRECATED, this has no affect anymore. Use `rpc_max_response_size` instead. - #[arg(long)] + #[clap(long)] pub ws_max_out_buffer_capacity: Option, /// Specify browser Origins allowed to access the HTTP & WS RPC servers. @@ -148,29 +148,29 @@ pub struct RunCmd { /// value). Value of `all` will disable origin validation. Default is to /// allow localhost and origins. When running in /// --dev mode the default is to allow all origins. - #[arg(long, value_name = "ORIGINS", value_parser = parse_cors)] + #[clap(long, value_name = "ORIGINS", parse(from_str = parse_cors))] pub rpc_cors: Option, /// Specify Prometheus exporter TCP Port. - #[arg(long, value_name = "PORT")] + #[clap(long, value_name = "PORT")] pub prometheus_port: Option, /// Do not expose a Prometheus exporter endpoint. /// /// Prometheus metric endpoint is enabled by default. - #[arg(long)] + #[clap(long)] pub no_prometheus: bool, /// The human-readable name for this node. /// /// The node name will be reported to the telemetry server, if enabled. - #[arg(long, value_name = "NAME")] + #[clap(long, value_name = "NAME")] pub name: Option, /// Disable connecting to the Substrate telemetry server. /// /// Telemetry is on by default on global chains. - #[arg(long)] + #[clap(long)] pub no_telemetry: bool, /// The URL of the telemetry server to connect to. @@ -179,7 +179,7 @@ pub struct RunCmd { /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting /// the least verbosity. /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. - #[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)] + #[clap(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] pub telemetry_endpoints: Vec<(String, u8)>, #[allow(missing_docs)] @@ -203,40 +203,40 @@ pub struct RunCmd { pub pool_config: TransactionPoolParams, /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. - #[arg(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] + #[clap(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub alice: bool, /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. - #[arg(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] + #[clap(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to /// keystore. - #[arg(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] + #[clap(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. - #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] + #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] pub dave: bool, /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. - #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] + #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] pub eve: bool, /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. - #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] + #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] pub ferdie: bool, /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. - #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] + #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] pub one: bool, /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. - #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] + #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] pub two: bool, /// Enable authoring even when offline. - #[arg(long)] + #[clap(long)] pub force_authoring: bool, #[allow(missing_docs)] @@ -246,11 +246,11 @@ pub struct RunCmd { /// The size of the instances cache for each runtime. /// /// The default value is 8 and the values higher than 256 are ignored. - #[arg(long)] + #[clap(long)] pub max_runtime_instances: Option, /// Maximum number of different runtimes that can be cached. - #[arg(long, default_value_t = 2)] + #[clap(long, default_value = "2")] pub runtime_cache_size: u8, /// Run a temporary node. @@ -262,7 +262,7 @@ pub struct RunCmd { /// which includes: database, node key and keystore. /// /// When `--dev` is given and no explicit `--base-path`, this option is implied. - #[arg(long, conflicts_with = "base_path")] + #[clap(long, conflicts_with = "base-path")] pub tmp: bool, } @@ -485,7 +485,7 @@ impl CliConfiguration for RunCmd { Ok(if self.tmp { Some(BasePath::new_temp_dir()?) } else { - match self.shared_params().base_path()? { + match self.shared_params().base_path() { Some(r) => Some(r), // If `dev` is enabled, we use the temp base path. None if self.shared_params().is_dev() => Some(BasePath::new_temp_dir()?), @@ -597,7 +597,7 @@ impl From for Option> { } /// Parse cors origins. -fn parse_cors(s: &str) -> Result { +fn parse_cors(s: &str) -> Cors { let mut is_all = false; let mut origins = Vec::new(); for part in s.split(',') { @@ -611,9 +611,9 @@ fn parse_cors(s: &str) -> Result { } if is_all { - Ok(Cors::All) + Cors::All } else { - Ok(Cors::List(origins)) + Cors::List(origins) } } diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs index 2c3ff3a1575fd..e0a5fce353ef4 100644 --- a/client/cli/src/commands/sign.rs +++ b/client/cli/src/commands/sign.rs @@ -23,21 +23,21 @@ use sp_core::crypto::SecretString; /// The `sign` command #[derive(Debug, Clone, Parser)] -#[command(name = "sign", about = "Sign a message, with a given (secret) key")] +#[clap(name = "sign", about = "Sign a message, with a given (secret) key")] pub struct SignCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. /// If not given, you will be prompted for the URI. - #[arg(long)] + #[clap(long)] suri: Option, /// Message to sign, if not provided you will be prompted to /// pass the message via STDIN - #[arg(long)] + #[clap(long)] message: Option, /// The message on STDIN is hex-encoded data - #[arg(long)] + #[clap(long)] hex: bool, #[allow(missing_docs)] @@ -70,7 +70,7 @@ fn sign( message: Vec, ) -> error::Result { let pair = utils::pair_from_suri::

(suri, password)?; - Ok(array_bytes::bytes2hex("", pair.sign(&message).as_ref())) + Ok(hex::encode(pair.sign(&message))) } #[cfg(test)] diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 1ce2b23221691..32556f0ea728d 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -48,7 +48,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { uri.into() } } else { - rpassword::prompt_password("URI: ")? + rpassword::read_password_from_tty(Some("URI: "))? }; Ok(uri) @@ -203,7 +203,7 @@ where Pair: sp_core::Pair, Pair::Public: Into, { - let public = array_bytes::hex2bytes(public_str)?; + let public = decode_hex(public_str)?; let public_key = Pair::Public::try_from(&public) .map_err(|_| "Failed to construct public key from given hex")?; @@ -273,17 +273,26 @@ where format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) } +/// helper method for decoding hex +pub fn decode_hex>(message: T) -> Result, Error> { + let mut message = message.as_ref(); + if message[..2] == [b'0', b'x'] { + message = &message[2..] + } + Ok(hex::decode(message)?) +} + /// checks if message is Some, otherwise reads message from stdin and optionally decodes hex pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result, Error> { let mut message = vec![]; match msg { Some(m) => { - message = array_bytes::hex2bytes(m.as_str())?; + message = decode_hex(m)?; }, None => { std::io::stdin().lock().read_to_end(&mut message)?; if should_decode { - message = array_bytes::hex2bytes(array_bytes::hex_bytes2hex_str(&message)?)?; + message = decode_hex(&message)?; } }, } diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index ae0007ac7964d..6a1bf77f6c8b0 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -29,10 +29,10 @@ use utils::print_from_uri; /// The `vanity` command #[derive(Debug, Clone, Parser)] -#[command(name = "vanity", about = "Generate a seed that provides a vanity address")] +#[clap(name = "vanity", about = "Generate a seed that provides a vanity address")] pub struct VanityCmd { /// Desired pattern - #[arg(long, value_parser = assert_non_empty_string)] + #[clap(long, parse(try_from_str = assert_non_empty_string))] pattern: String, #[allow(missing_docs)] @@ -178,7 +178,7 @@ mod tests { #[test] fn test_generation_with_single_char() { let seed = generate_key::("ab", default_ss58_version()).unwrap(); - assert!(sr25519::Pair::from_seed_slice(&array_bytes::hex2bytes_unchecked(&seed)) + assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) .unwrap() .public() .to_ss58check() @@ -190,7 +190,7 @@ mod tests { let seed = generate_key::("ab", Ss58AddressFormatRegistry::PolkadotAccount.into()) .unwrap(); - assert!(sr25519::Pair::from_seed_slice(&array_bytes::hex2bytes_unchecked(&seed)) + assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) .unwrap() .public() .to_ss58check_with_version(Ss58AddressFormatRegistry::PolkadotAccount.into()) diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index 82554fbf268fa..b004a948a7a48 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -24,7 +24,7 @@ use sp_core::crypto::{ByteArray, Ss58Codec}; /// The `verify` command #[derive(Debug, Clone, Parser)] -#[command( +#[clap( name = "verify", about = "Verify a signature for a message, provided on STDIN, with a given (public or secret) key" )] @@ -39,11 +39,11 @@ pub struct VerifyCmd { /// Message to verify, if not provided you will be prompted to /// pass the message via STDIN - #[arg(long)] + #[clap(long)] message: Option, /// The message on STDIN is hex-encoded data - #[arg(long)] + #[clap(long)] hex: bool, #[allow(missing_docs)] @@ -55,7 +55,7 @@ impl VerifyCmd { /// Run the command pub fn run(&self) -> error::Result<()> { let message = utils::read_message(self.message.as_ref(), self.hex)?; - let sig_data = array_bytes::hex2bytes(&self.sig)?; + let sig_data = utils::decode_hex(&self.sig)?; let uri = utils::read_uri(self.uri.as_ref())?; let uri = if let Some(uri) = uri.strip_prefix("0x") { uri } else { &uri }; @@ -71,7 +71,7 @@ where let signature = Pair::Signature::try_from(&sig_data).map_err(|_| error::Error::SignatureFormatInvalid)?; - let pubkey = if let Ok(pubkey_vec) = array_bytes::hex2bytes(uri) { + let pubkey = if let Ok(pubkey_vec) = hex::decode(uri) { Pair::Public::from_slice(pubkey_vec.as_slice()) .map_err(|_| error::Error::KeyFormatInvalid)? } else { diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 77689708a231f..4ebbc8c72c19a 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -31,7 +31,7 @@ use sc_service::{ NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }, - BlocksPruning, ChainSpec, TracingReceiver, + ChainSpec, KeepBlocks, TracingReceiver, }; use sc_tracing::logging::LoggerBuilder; use std::{net::SocketAddr, path::PathBuf}; @@ -125,7 +125,7 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `SharedParams`. fn base_path(&self) -> Result> { - self.shared_params().base_path() + Ok(self.shared_params().base_path()) } /// Returns `true` if the node is for development or not @@ -230,12 +230,18 @@ pub trait CliConfiguration: Sized { }) } - /// Get the trie cache maximum size. + /// Get the state cache size. /// /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. - /// If `None` is returned the trie cache is disabled. - fn trie_cache_maximum_size(&self) -> Result> { - Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) + fn state_cache_size(&self) -> Result { + Ok(self.import_params().map(|x| x.state_cache_size()).unwrap_or_default()) + } + + /// Get the state cache child ratio (if any). + /// + /// By default this is `None`. + fn state_cache_child_ratio(&self) -> Result> { + Ok(Default::default()) } /// Get the state pruning mode. @@ -251,11 +257,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `BlocksPruning::KeepFinalized`. - fn blocks_pruning(&self) -> Result { + /// `KeepBlocks::All`. + fn keep_blocks(&self) -> Result { self.pruning_params() - .map(|x| x.blocks_pruning()) - .unwrap_or_else(|| Ok(BlocksPruning::KeepFinalized)) + .map(|x| x.keep_blocks()) + .unwrap_or_else(|| Ok(KeepBlocks::All)) } /// Get the chain ID (string). @@ -527,9 +533,10 @@ pub trait CliConfiguration: Sized { keystore_remote, keystore, database: self.database_config(&config_dir, database_cache_size, database)?, - trie_cache_maximum_size: self.trie_cache_maximum_size()?, + state_cache_size: self.state_cache_size()?, + state_cache_child_ratio: self.state_cache_child_ratio()?, state_pruning: self.state_pruning()?, - blocks_pruning: self.blocks_pruning()?, + keep_blocks: self.keep_blocks()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, @@ -659,6 +666,17 @@ pub trait CliConfiguration: Sized { } } + if self.import_params().map_or(false, |p| { + #[allow(deprecated)] + p.unsafe_pruning + }) { + // according to https://github.com/substrate/issues/8103; + warn!( + "WARNING: \"--unsafe-pruning\" CLI-flag is deprecated and has no effect. \ + In future builds it will be removed, and providing this flag will lead to an error." + ); + } + Ok(()) } } diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index a0f843e73bf53..f38a95e0115f1 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -69,8 +69,8 @@ pub enum Error { #[error("Key storage issue encountered")] KeyStorage(#[from] sc_keystore::Error), - #[error("Invalid hexadecimal string data, {0:?}")] - HexDataConversion(array_bytes::Error), + #[error("Invalid hexadecimal string data")] + HexDataConversion(#[from] hex::FromHexError), /// Application specific error chain sequence forwarder. #[error(transparent)] @@ -97,9 +97,3 @@ impl From for Error { Error::InvalidUri(e) } } - -impl From for Error { - fn from(e: array_bytes::Error) -> Error { - Error::HexDataConversion(e) - } -} diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index b6d0d47e52c1f..e01befbef41a2 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -129,9 +129,9 @@ pub trait SubstrateCli: Sized { let about = Self::description(); let app = app .name(name) - .author(author) - .about(about) - .version(full_version) + .author(author.as_str()) + .about(about.as_str()) + .version(full_version.as_str()) .propagate_version(true) .args_conflicts_with_subcommands(true) .subcommand_negates_reqs(true); @@ -153,9 +153,9 @@ pub trait SubstrateCli: Sized { /// /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are /// used. It will return a [`clap::Error`], where the [`clap::Error::kind`] is a - /// [`clap::error::ErrorKind::DisplayHelp`] or [`clap::error::ErrorKind::DisplayVersion`] - /// respectively. You must call [`clap::Error::exit`] or perform a [`std::process::exit`]. - fn try_from_iter(iter: I) -> clap::error::Result + /// [`clap::ErrorKind::DisplayHelp`] or [`clap::ErrorKind::DisplayVersion`] respectively. + /// You must call [`clap::Error::exit`] or perform a [`std::process::exit`]. + fn try_from_iter(iter: I) -> clap::Result where Self: Parser + Sized, I: IntoIterator, @@ -169,7 +169,11 @@ pub trait SubstrateCli: Sized { let name = Self::executable_name(); let author = Self::author(); let about = Self::description(); - let app = app.name(name).author(author).about(about).version(full_version); + let app = app + .name(name) + .author(author.as_str()) + .about(about.as_str()) + .version(full_version.as_str()); let matches = app.try_get_matches_from(iter)?; diff --git a/client/cli/src/params/database_params.rs b/client/cli/src/params/database_params.rs index fdd3622580a6d..e954b8cc3bc20 100644 --- a/client/cli/src/params/database_params.rs +++ b/client/cli/src/params/database_params.rs @@ -23,11 +23,17 @@ use clap::Args; #[derive(Debug, Clone, PartialEq, Args)] pub struct DatabaseParams { /// Select database backend to use. - #[arg(long, alias = "db", value_name = "DB", ignore_case = true, value_enum)] + #[clap( + long, + alias = "db", + value_name = "DB", + ignore_case = true, + possible_values = Database::variants(), + )] pub database: Option, /// Limit the memory the database cache can use. - #[arg(long = "db-cache", value_name = "MiB")] + #[clap(long = "db-cache", value_name = "MiB")] pub database_cache_size: Option, } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index b7ccbf1c8ed55..aef7511ffc371 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -41,13 +41,25 @@ pub struct ImportParams { #[clap(flatten)] pub database_params: DatabaseParams, + /// THIS IS A DEPRECATED CLI-ARGUMENT. + /// + /// It has been preserved in order to not break the compatibility with the existing scripts. + /// Enabling this option will lead to a runtime warning. + /// In future this option will be removed completely, thus specifying it will lead to a start + /// up error. + /// + /// Details: + #[clap(long)] + #[deprecated = "According to https://github.com/paritytech/substrate/issues/8103"] + pub unsafe_pruning: bool, + /// Method for executing Wasm runtime code. - #[arg( + #[clap( long = "wasm-execution", value_name = "METHOD", - value_enum, + possible_values = WasmExecutionMethod::variants(), ignore_case = true, - default_value_t = DEFAULT_WASM_EXECUTION_METHOD, + default_value = DEFAULT_WASM_EXECUTION_METHOD, )] pub wasm_method: WasmExecutionMethod, @@ -64,18 +76,18 @@ pub struct ImportParams { /// The `legacy-instance-reuse` strategy is deprecated and will /// be removed in the future. It should only be used in case of /// issues with the default instantiation strategy. - #[arg( + #[clap( long, value_name = "STRATEGY", default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - value_enum, + arg_enum, )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Specify the path where local WASM runtimes are stored. /// /// These runtimes will override on-chain runtimes when the version matches. - #[arg(long, value_name = "PATH")] + #[clap(long, value_name = "PATH", parse(from_os_str))] pub wasm_runtime_overrides: Option, #[allow(missing_docs)] @@ -83,30 +95,14 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. - /// - /// Providing `0` will disable the cache. - #[arg(long, value_name = "Bytes", default_value_t = 67108864)] - pub trie_cache_size: usize, - - /// DEPRECATED - /// - /// Switch to `--trie-cache-size`. - #[arg(long)] - state_cache_size: Option, + #[clap(long, value_name = "Bytes", default_value = "67108864")] + pub state_cache_size: usize, } impl ImportParams { - /// Specify the trie cache maximum size. - pub fn trie_cache_maximum_size(&self) -> Option { - if self.state_cache_size.is_some() { - eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); - } - - if self.trie_cache_size == 0 { - None - } else { - Some(self.trie_cache_size) - } + /// Specify the state cache size. + pub fn state_cache_size(&self) -> usize { + self.state_cache_size } /// Get the WASM execution method from the parameters @@ -156,39 +152,39 @@ impl ImportParams { pub struct ExecutionStrategiesParams { /// The means of execution used when calling into the runtime for importing blocks as /// part of an initial sync. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] pub execution_syncing: Option, /// The means of execution used when calling into the runtime for general block import /// (including locally authored blocks). - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] pub execution_import_block: Option, /// The means of execution used when calling into the runtime while constructing blocks. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] pub execution_block_construction: Option, /// The means of execution used when calling into the runtime while using an off-chain worker. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] pub execution_offchain_worker: Option, /// The means of execution used when calling into the runtime while not syncing, importing or /// constructing blocks. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] pub execution_other: Option, /// The execution strategy that should be used by all execution contexts. - #[arg( + #[clap( long, value_name = "STRATEGY", - value_enum, + arg_enum, ignore_case = true, conflicts_with_all = &[ - "execution_other", - "execution_offchain_worker", - "execution_block_construction", - "execution_import_block", - "execution_syncing", + "execution-other", + "execution-offchain-worker", + "execution-block-construction", + "execution-import-block", + "execution-syncing", ] )] pub execution: Option, diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index d6c3c35d82418..46403f95fbc4b 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -32,31 +32,32 @@ const DEFAULT_KEYSTORE_CONFIG_PATH: &str = "keystore"; #[derive(Debug, Clone, Args)] pub struct KeystoreParams { /// Specify custom URIs to connect to for keystore-services - #[arg(long)] + #[clap(long)] pub keystore_uri: Option, /// Specify custom keystore path. - #[arg(long, value_name = "PATH")] + #[clap(long, value_name = "PATH", parse(from_os_str))] pub keystore_path: Option, /// Use interactive shell for entering the password used by the keystore. - #[arg(long, conflicts_with_all = &["password", "password_filename"])] + #[clap(long, conflicts_with_all = &["password", "password-filename"])] pub password_interactive: bool, /// Password used by the keystore. This allows appending an extra user-defined secret to the /// seed. - #[arg( + #[clap( long, - value_parser = secret_string_from_str, - conflicts_with_all = &["password_interactive", "password_filename"] + parse(try_from_str = secret_string_from_str), + conflicts_with_all = &["password-interactive", "password-filename"] )] pub password: Option, /// File that contains the password used by the keystore. - #[arg( + #[clap( long, value_name = "PATH", - conflicts_with_all = &["password_interactive", "password"] + parse(from_os_str), + conflicts_with_all = &["password-interactive", "password"] )] pub password_filename: Option, } @@ -93,7 +94,7 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let password = rpassword::prompt_password("Key password: ")?; + let password = rpassword::read_password_from_tty(Some("Key password: "))?; Some(SecretString::new(password)) } else { password @@ -104,5 +105,6 @@ impl KeystoreParams { } fn input_keystore_password() -> Result { - rpassword::prompt_password("Keystore password: ").map_err(|e| format!("{:?}", e).into()) + rpassword::read_password_from_tty(Some("Keystore password: ")) + .map_err(|e| format!("{:?}", e).into()) } diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 3197deb101bcc..9fccce606b4e4 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -27,7 +27,7 @@ mod transaction_pool_params; use crate::arg_enums::{CryptoScheme, OutputType}; use clap::Args; -use sp_core::crypto::{Ss58AddressFormat, Ss58AddressFormatRegistry}; +use sp_core::crypto::Ss58AddressFormat; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, @@ -40,17 +40,6 @@ pub use crate::params::{ transaction_pool_params::*, }; -/// Parse Ss58AddressFormat -pub fn parse_ss58_address_format(x: &str) -> Result { - match Ss58AddressFormatRegistry::try_from(x) { - Ok(format_registry) => Ok(format_registry.into()), - Err(_) => Err(format!( - "Unable to parse variant. Known variants: {:?}", - Ss58AddressFormat::all_names() - )), - } -} - /// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a /// decimal. #[derive(Debug, Clone)] @@ -129,7 +118,7 @@ impl BlockNumberOrHash { #[derive(Debug, Clone, Args)] pub struct CryptoSchemeFlag { /// cryptography scheme - #[arg(long, value_name = "SCHEME", value_enum, ignore_case = true, default_value_t = CryptoScheme::Sr25519)] + #[clap(long, value_name = "SCHEME", arg_enum, ignore_case = true, default_value = "sr25519")] pub scheme: CryptoScheme, } @@ -137,7 +126,7 @@ pub struct CryptoSchemeFlag { #[derive(Debug, Clone, Args)] pub struct OutputTypeFlag { /// output format - #[arg(long, value_name = "FORMAT", value_enum, ignore_case = true, default_value_t = OutputType::Text)] + #[clap(long, value_name = "FORMAT", arg_enum, ignore_case = true, default_value = "text")] pub output_type: OutputType, } @@ -145,12 +134,13 @@ pub struct OutputTypeFlag { #[derive(Debug, Clone, Args)] pub struct NetworkSchemeFlag { /// network address format - #[arg( + #[clap( short = 'n', long, value_name = "NETWORK", + possible_values = &Ss58AddressFormat::all_names()[..], ignore_case = true, - value_parser = parse_ss58_address_format, + parse(try_from_str = Ss58AddressFormat::try_from), )] pub network: Option, } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 5580dea45bde6..74c2db92c3215 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -19,10 +19,11 @@ use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use clap::Args; use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig}, + config::{ + NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig, + }, multiaddr::Protocol, }; -use sc_network_common::config::{NonReservedPeerMode, SetConfig, TransportConfig}; use sc_service::{ config::{Multiaddr, MultiaddrWithPeerId}, ChainSpec, ChainType, @@ -33,11 +34,11 @@ use std::{borrow::Cow, path::PathBuf}; #[derive(Debug, Clone, Args)] pub struct NetworkParams { /// Specify a list of bootnodes. - #[arg(long, value_name = "ADDR", num_args = 1..)] + #[clap(long, value_name = "ADDR", multiple_values(true))] pub bootnodes: Vec, /// Specify a list of reserved node addresses. - #[arg(long, value_name = "ADDR", num_args = 1..)] + #[clap(long, value_name = "ADDR", multiple_values(true))] pub reserved_nodes: Vec, /// Whether to only synchronize the chain with reserved nodes. @@ -48,12 +49,12 @@ pub struct NetworkParams { /// In particular, if you are a validator your node might still connect to other /// validator nodes and collator nodes regardless of whether they are defined as /// reserved nodes. - #[arg(long)] + #[clap(long)] pub reserved_only: bool, /// The public address that other nodes will use to connect to it. /// This can be used if there's a proxy in front of this node. - #[arg(long, value_name = "PUBLIC_ADDR", num_args = 1..)] + #[clap(long, value_name = "PUBLIC_ADDR", multiple_values(true))] pub public_addr: Vec, /// Listen on this multiaddress. @@ -61,50 +62,49 @@ pub struct NetworkParams { /// By default: /// If `--validator` is passed: `/ip4/0.0.0.0/tcp/` and `/ip6/[::]/tcp/`. /// Otherwise: `/ip4/0.0.0.0/tcp//ws` and `/ip6/[::]/tcp//ws`. - #[arg(long, value_name = "LISTEN_ADDR", num_args = 1..)] + #[clap(long, value_name = "LISTEN_ADDR", multiple_values(true))] pub listen_addr: Vec, /// Specify p2p protocol TCP port. - #[arg(long, value_name = "PORT", conflicts_with_all = &[ "listen_addr" ])] + #[clap(long, value_name = "PORT", conflicts_with_all = &[ "listen-addr" ])] pub port: Option, /// Always forbid connecting to private IPv4 addresses (as specified in /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with /// `--reserved-nodes` or `--bootnodes`. Enabled by default for chains marked as "live" in /// their chain specifications. - #[arg(long, conflicts_with_all = &["allow_private_ipv4"])] + #[clap(long, conflicts_with_all = &["allow-private-ipv4"])] pub no_private_ipv4: bool, /// Always accept connecting to private IPv4 addresses (as specified in /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Enabled by default for chains marked as /// "local" in their chain specifications, or when `--dev` is passed. - #[arg(long, conflicts_with_all = &["no_private_ipv4"])] + #[clap(long, conflicts_with_all = &["no-private-ipv4"])] pub allow_private_ipv4: bool, /// Specify the number of outgoing connections we're trying to maintain. - #[arg(long, value_name = "COUNT", default_value_t = 15)] + #[clap(long, value_name = "COUNT", default_value = "25")] pub out_peers: u32, /// Maximum number of inbound full nodes peers. - #[arg(long, value_name = "COUNT", default_value_t = 25)] + #[clap(long, value_name = "COUNT", default_value = "25")] pub in_peers: u32, - /// Maximum number of inbound light nodes peers. - #[arg(long, value_name = "COUNT", default_value_t = 100)] + #[clap(long, value_name = "COUNT", default_value = "100")] pub in_peers_light: u32, /// Disable mDNS discovery. /// /// By default, the network will use mDNS to discover other nodes on the /// local network. This disables it. Automatically implied when using --dev. - #[arg(long)] + #[clap(long)] pub no_mdns: bool, /// Maximum number of peers from which to ask for the same blocks in parallel. /// /// This allows downloading announced blocks from multiple peers. Decrease to save /// traffic and risk increased latency. - #[arg(long, value_name = "COUNT", default_value_t = 5)] + #[clap(long, value_name = "COUNT", default_value = "5")] pub max_parallel_downloads: u32, #[allow(missing_docs)] @@ -115,7 +115,7 @@ pub struct NetworkParams { /// /// By default this option is `true` for `--dev` or when the chain type is /// `Local`/`Development` and false otherwise. - #[arg(long)] + #[clap(long)] pub discover_local: bool, /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in @@ -123,11 +123,11 @@ pub struct NetworkParams { /// /// See the S/Kademlia paper for more information on the high level design as well as its /// security improvements. - #[arg(long)] + #[clap(long)] pub kademlia_disjoint_query_paths: bool, /// Join the IPFS network and serve transactions over bitswap protocol. - #[arg(long)] + #[clap(long)] pub ipfs_server: bool, /// Blockchain syncing mode. @@ -136,11 +136,11 @@ pub struct NetworkParams { /// - `fast`: Download blocks and the latest state only. /// - `fast-unsafe`: Same as `fast`, but skip downloading state proofs. /// - `warp`: Download the latest state and proof. - #[arg( + #[clap( long, - value_enum, + arg_enum, value_name = "SYNC_MODE", - default_value_t = SyncMode::Full, + default_value = "full", ignore_case = true, verbatim_doc_comment )] diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 2346455c26a37..d51b6143ed393 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -46,7 +46,7 @@ pub struct NodeKeyParams { /// WARNING: Secrets provided as command-line arguments are easily exposed. /// Use of this option should be limited to development and testing. To use /// an externally managed secret key, use `--node-key-file` instead. - #[arg(long, value_name = "KEY")] + #[clap(long, value_name = "KEY")] pub node_key: Option, /// The type of secret key to use for libp2p networking. @@ -66,7 +66,7 @@ pub struct NodeKeyParams { /// /// The node's secret key determines the corresponding public key and hence the /// node's peer ID in the context of libp2p. - #[arg(long, value_name = "TYPE", value_enum, ignore_case = true, default_value_t = NodeKeyType::Ed25519)] + #[clap(long, value_name = "TYPE", arg_enum, ignore_case = true, default_value = "ed25519")] pub node_key_type: NodeKeyType, /// The file from which to read the node's secret key to use for libp2p networking. @@ -79,7 +79,7 @@ pub struct NodeKeyParams { /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. - #[arg(long, value_name = "FILE")] + #[clap(long, value_name = "FILE")] pub node_key_file: Option, } @@ -122,7 +122,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result, - /// Specify the blocks pruning mode, a number of blocks to keep or 'archive'. + #[clap(long, value_name = "PRUNING_MODE")] + pub pruning: Option, + /// Specify the number of finalized blocks to keep in the database. /// - /// Default is to keep all finalized blocks. - /// otherwise, all blocks can be kept (i.e 'archive'), - /// or for all canonical blocks (i.e 'archive-canonical'), - /// or for the last N blocks (i.e a number). + /// Default is to keep all blocks. /// /// NOTE: only finalized blocks are subject for removal! - #[arg(alias = "keep-blocks", long, value_name = "COUNT")] - pub blocks_pruning: Option, + #[clap(long, value_name = "COUNT")] + pub keep_blocks: Option, } impl PruningParams { /// Get the pruning value from the parameters pub fn state_pruning(&self) -> error::Result> { - self.state_pruning + self.pruning .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), - "archive-canonical" => Ok(PruningMode::ArchiveCanonical), bc => bc .parse() - .map_err(|_| { - error::Error::Input("Invalid state pruning mode specified".to_string()) - }) - .map(PruningMode::blocks_pruning), + .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) + .map(PruningMode::keep_blocks), }) .transpose() } /// Get the block pruning value from the parameters - pub fn blocks_pruning(&self) -> error::Result { - match self.blocks_pruning.as_ref() { - Some(bp) => match bp.as_str() { - "archive" => Ok(BlocksPruning::KeepAll), - "archive-canonical" => Ok(BlocksPruning::KeepFinalized), - bc => bc - .parse() - .map_err(|_| { - error::Error::Input("Invalid blocks pruning mode specified".to_string()) - }) - .map(BlocksPruning::Some), - }, - None => Ok(BlocksPruning::KeepFinalized), - } + pub fn keep_blocks(&self) -> error::Result { + Ok(match self.keep_blocks { + Some(n) => KeepBlocks::Some(n), + None => KeepBlocks::All, + }) } } diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 6c03ac2c4ec23..67b18aa8b09e2 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -28,25 +28,25 @@ pub struct SharedParams { /// /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file /// with the chainspec (such as one exported by the `build-spec` subcommand). - #[arg(long, value_name = "CHAIN_SPEC")] + #[clap(long, value_name = "CHAIN_SPEC")] pub chain: Option, /// Specify the development chain. /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. - #[arg(long, conflicts_with_all = &["chain"])] + #[clap(long, conflicts_with_all = &["chain"])] pub dev: bool, /// Specify custom base path. - #[arg(long, short = 'd', value_name = "PATH")] + #[clap(long, short = 'd', value_name = "PATH", parse(from_os_str))] pub base_path: Option, /// Sets a custom logging filter. Syntax is =, e.g. -lsync=debug. /// /// Log levels (least to most verbose) are error, warn, info, debug, and trace. /// By default, all targets log `info`. The global log level can be set with -l. - #[arg(short = 'l', long, value_name = "LOG_PATTERN", num_args = 1..)] + #[clap(short = 'l', long, value_name = "LOG_PATTERN", multiple_values(true))] pub log: Vec, /// Enable detailed log output. @@ -54,11 +54,11 @@ pub struct SharedParams { /// This includes displaying the log target, log level and thread name. /// /// This is automatically enabled when something is logged with any higher level than `info`. - #[arg(long)] + #[clap(long)] pub detailed_log_output: bool, /// Disable log color output. - #[arg(long)] + #[clap(long)] pub disable_log_color: bool, /// Enable feature to dynamically update and reload the log filter. @@ -68,27 +68,22 @@ pub struct SharedParams { /// /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this /// option not being set. - #[arg(long)] + #[clap(long)] pub enable_log_reloading: bool, /// Sets a custom profiling filter. Syntax is the same as for logging: = - #[arg(long, value_name = "TARGETS")] + #[clap(long, value_name = "TARGETS")] pub tracing_targets: Option, /// Receiver to process tracing messages. - #[arg(long, value_name = "RECEIVER", value_enum, ignore_case = true, default_value_t = TracingReceiver::Log)] + #[clap(long, value_name = "RECEIVER", arg_enum, ignore_case = true, default_value = "log")] pub tracing_receiver: TracingReceiver, } impl SharedParams { /// Specify custom base path. - pub fn base_path(&self) -> Result, crate::Error> { - match &self.base_path { - Some(r) => Ok(Some(r.clone().into())), - // If `dev` is enabled, we use the temp base path. - None if self.is_dev() => Ok(Some(BasePath::new_temp_dir()?)), - None => Ok(None), - } + pub fn base_path(&self) -> Option { + self.base_path.clone().map(Into::into) } /// Specify the development chain. diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index 6b3a2d8a97a01..6429dfec3f908 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -23,15 +23,15 @@ use sc_service::config::TransactionPoolOptions; #[derive(Debug, Clone, Args)] pub struct TransactionPoolParams { /// Maximum number of transactions in the transaction pool. - #[arg(long, value_name = "COUNT", default_value_t = 8192)] + #[clap(long, value_name = "COUNT", default_value = "8192")] pub pool_limit: usize, /// Maximum number of kilobytes of all transactions stored in the pool. - #[arg(long, value_name = "COUNT", default_value_t = 20480)] + #[clap(long, value_name = "COUNT", default_value = "20480")] pub pool_kbytes: usize, /// How long a transaction is banned for, if it is considered invalid. Defaults to 1800s. - #[arg(long, value_name = "SECONDS")] + #[clap(long, value_name = "SECONDS")] pub tx_ban_seconds: Option, } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 3fe9891e9a7ba..69499fa346e31 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" @@ -37,7 +37,7 @@ sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -parking_lot = "0.12.1" +parking_lot = "0.12.0" tempfile = "3.1.0" sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network = { version = "0.10.0-dev", path = "../../network" } diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index b17feae45897e..30554006732c0 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -18,9 +18,7 @@ //! Module implementing the logic for verifying and importing AuRa blocks. -use crate::{ - aura_err, authorities, find_pre_digest, slot_author, AuthorityId, CompatibilityMode, Error, -}; +use crate::{aura_err, authorities, find_pre_digest, slot_author, AuthorityId, Error}; use codec::{Codec, Decode, Encode}; use log::{debug, info, trace}; use prometheus_endpoint::Registry; @@ -33,15 +31,21 @@ use sc_consensus_slots::{check_equivocation, CheckedHeader, InherentDataProvider use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; -use sp_consensus::Error as ConsensusError; -use sp_consensus_aura::{digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi}; +use sp_blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + HeaderBackend, +}; +use sp_consensus::{CanAuthorWith, Error as ConsensusError}; +use sp_consensus_aura::{ + digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, + AURA_ENGINE_ID, +}; use sp_consensus_slots::Slot; use sp_core::{crypto::Pair, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header, NumberFor}, + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, Header}, DigestItem, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; @@ -105,37 +109,38 @@ where } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData

, create_inherent_data_providers: CIDP, + can_author_with: CAW, check_for_equivocation: CheckForEquivocation, telemetry: Option, - compatibility_mode: CompatibilityMode, } -impl AuraVerifier { +impl AuraVerifier { pub(crate) fn new( client: Arc, create_inherent_data_providers: CIDP, + can_author_with: CAW, check_for_equivocation: CheckForEquivocation, telemetry: Option, - compatibility_mode: CompatibilityMode, ) -> Self { Self { client, create_inherent_data_providers, + can_author_with, check_for_equivocation, telemetry, - compatibility_mode, phantom: PhantomData, } } } -impl AuraVerifier +impl AuraVerifier where P: Send + Sync + 'static, + CAW: Send + Sync + 'static, CIDP: Send, { async fn check_inherents( @@ -149,8 +154,19 @@ where where C: ProvideRuntimeApi, C::Api: BlockBuilderApi, + CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "aura", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self .client .runtime_api() @@ -171,13 +187,14 @@ where } #[async_trait::async_trait] -impl Verifier for AuraVerifier> +impl Verifier for AuraVerifier where - C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore, + C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore + BlockOf, C::Api: BlockBuilderApi + AuraApi> + ApiExt, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, + CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -187,13 +204,8 @@ where ) -> Result<(BlockImportParams, Option)>>), String> { let hash = block.header.hash(); let parent_hash = *block.header.parent_hash(); - let authorities = authorities( - self.client.as_ref(), - parent_hash, - *block.header.number(), - &self.compatibility_mode, - ) - .map_err(|e| format!("Could not fetch authorities at {:?}: {}", parent_hash, e))?; + let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) + .map_err(|e| format!("Could not fetch authorities at {:?}: {}", parent_hash, e))?; let create_inherent_data_providers = self .create_inherent_data_providers @@ -229,16 +241,15 @@ where inherent_data.aura_replace_inherent_data(slot); - // skip the inherents verification if the runtime API is old or not expected to - // exist. - if !block.state_action.skip_execution_checks() && - self.client - .runtime_api() - .has_api_with::, _>( - &BlockId::Hash(parent_hash), - |v| v >= 2, - ) - .map_err(|e| e.to_string())? + // skip the inherents verification if the runtime API is old. + if self + .client + .runtime_api() + .has_api_with::, _>( + &BlockId::Hash(parent_hash), + |v| v >= 2, + ) + .map_err(|e| e.to_string())? { self.check_inherents( new_block.clone(), @@ -263,12 +274,28 @@ where "pre_header" => ?pre_header, ); + // Look for an authorities-change log. + let maybe_keys = pre_header + .digest() + .logs() + .iter() + .filter_map(|l| { + l.try_to::>>(OpaqueDigestItemId::Consensus( + &AURA_ENGINE_ID, + )) + }) + .find_map(|l| match l { + ConsensusLog::AuthoritiesChange(a) => + Some(vec![(well_known_cache_keys::AUTHORITIES, a.encode())]), + _ => None, + }); + block.header = pre_header; block.post_digests.push(seal); block.fork_choice = Some(ForkChoiceStrategy::LongestChain); block.post_hash = Some(hash); - Ok((block, None)) + Ok((block, maybe_keys)) }, CheckedHeader::Deferred(a, b) => { debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); @@ -311,7 +338,7 @@ impl Default for CheckForEquivocation { } /// Parameters of [`import_queue`]. -pub struct ImportQueueParams<'a, Block: BlockT, I, C, S, CIDP> { +pub struct ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> { /// The block import to use. pub block_import: I, /// The justification import. @@ -324,18 +351,16 @@ pub struct ImportQueueParams<'a, Block: BlockT, I, C, S, CIDP> { pub spawner: &'a S, /// The prometheus registry. pub registry: Option<&'a Registry>, + /// Can we author with the current node? + pub can_author_with: CAW, /// Should we check for equivocation? pub check_for_equivocation: CheckForEquivocation, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, - /// Compatibility mode that should be used. - /// - /// If in doubt, use `Default::default()`. - pub compatibility_mode: CompatibilityMode>, } /// Start an import queue for the Aura consensus algorithm. -pub fn import_queue( +pub fn import_queue( ImportQueueParams { block_import, justification_import, @@ -343,10 +368,10 @@ pub fn import_queue( create_inherent_data_providers, spawner, registry, + can_author_with, check_for_equivocation, telemetry, - compatibility_mode, - }: ImportQueueParams, + }: ImportQueueParams, ) -> Result, sp_consensus::Error> where Block: BlockT, @@ -367,51 +392,50 @@ where P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, S: sp_core::traits::SpawnEssentialNamed, + CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { let verifier = build_verifier::(BuildVerifierParams { client, create_inherent_data_providers, + can_author_with, check_for_equivocation, telemetry, - compatibility_mode, }); Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } /// Parameters of [`build_verifier`]. -pub struct BuildVerifierParams { +pub struct BuildVerifierParams { /// The client to interact with the chain. pub client: Arc, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, + /// Can we author with the current node? + pub can_author_with: CAW, /// Should we check for equivocation? pub check_for_equivocation: CheckForEquivocation, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, - /// Compatibility mode that should be used. - /// - /// If in doubt, use `Default::default()`. - pub compatibility_mode: CompatibilityMode, } /// Build the [`AuraVerifier`] -pub fn build_verifier( +pub fn build_verifier( BuildVerifierParams { client, create_inherent_data_providers, + can_author_with, check_for_equivocation, telemetry, - compatibility_mode, - }: BuildVerifierParams, -) -> AuraVerifier { + }: BuildVerifierParams, +) -> AuraVerifier { AuraVerifier::<_, P, _, _>::new( client, create_inherent_data_providers, + can_author_with, check_for_equivocation, telemetry, - compatibility_mode, ) } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 50a02726cf56a..ee8be727dcdac 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -44,10 +44,12 @@ use sc_consensus_slots::{ SlotInfo, StorageChanges, }; use sc_telemetry::TelemetryHandle; -use sp_api::{Core, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_application_crypto::{AppKey, AppPublic}; use sp_blockchain::{HeaderBackend, Result as CResult}; -use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; +use sp_consensus::{ + BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, +}; use sp_consensus_slots::Slot; use sp_core::crypto::{ByteArray, Pair, Public}; use sp_inherents::CreateInherentDataProviders; @@ -74,43 +76,6 @@ pub use sp_consensus_aura::{ type AuthorityId

=

::Public; -/// Run `AURA` in a compatibility mode. -/// -/// This is required for when the chain was launched and later there -/// was a consensus breaking change. -#[derive(Debug, Clone)] -pub enum CompatibilityMode { - /// Don't use any compatibility mode. - None, - /// Call `initialize_block` before doing any runtime calls. - /// - /// Previously the node would execute `initialize_block` before fetchting the authorities - /// from the runtime. This behaviour changed in: - /// - /// By calling `initialize_block` before fetching the authorities, on a block that - /// would enact a new validator set, the block would already be build/sealed by an - /// authority of the new set. With this mode disabled (the default) a block that enacts a new - /// set isn't sealed/built by an authority of the new set, however to make new nodes be able to - /// sync old chains this compatibility mode exists. - UseInitializeBlock { - /// The block number until this compatibility mode should be executed. The first runtime - /// call in the context of the `until` block (importing it/building it) will disable the - /// compatibility mode (i.e. at `until` the default rules will apply). When enabling this - /// compatibility mode the `until` block should be a future block on which all nodes will - /// have upgraded to a release that includes the updated compatibility mode configuration. - /// At `until` block there will be a hard fork when the authority set changes, between the - /// old nodes (running with `initialize_block`, i.e. without the compatibility mode - /// configuration) and the new nodes. - until: N, - }, -} - -impl Default for CompatibilityMode { - fn default() -> Self { - Self::None - } -} - /// Get the slot duration for Aura. pub fn slot_duration(client: &C) -> CResult where @@ -143,7 +108,7 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A } /// Parameters of [`start_aura`]. -pub struct StartAuraParams { +pub struct StartAuraParams { /// The duration of a slot. pub slot_duration: SlotDuration, /// The client to interact with the chain. @@ -166,6 +131,8 @@ pub struct StartAuraParams { pub backoff_authoring_blocks: Option, /// The keystore used by the node. pub keystore: SyncCryptoStorePtr, + /// Can we author a block with this node? + pub can_author_with: CAW, /// The proportion of the slot dedicated to proposing. /// /// The block proposing will be limited to this proportion of the slot from the starting of the @@ -177,14 +144,10 @@ pub struct StartAuraParams { pub max_block_proposal_slot_portion: Option, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, - /// Compatibility mode that should be used. - /// - /// If in doubt, use `Default::default()`. - pub compatibility_mode: CompatibilityMode, } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( +pub fn start_aura( StartAuraParams { slot_duration, client, @@ -197,11 +160,11 @@ pub fn start_aura( force_authoring, backoff_authoring_blocks, keystore, + can_author_with, block_proposal_slot_portion, max_block_proposal_slot_portion, telemetry, - compatibility_mode, - }: StartAuraParams>, + }: StartAuraParams, ) -> Result, sp_consensus::Error> where P: Pair + Send + Sync, @@ -219,6 +182,7 @@ where CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, + CAW: CanAuthorWith + Send, Error: std::error::Error + Send + From + 'static, { let worker = build_aura_worker::(BuildAuraWorkerParams { @@ -233,7 +197,6 @@ where telemetry, block_proposal_slot_portion, max_block_proposal_slot_portion, - compatibility_mode, }); Ok(sc_consensus_slots::start_slot_worker( @@ -242,11 +205,12 @@ where SimpleSlotWorkerToSlotWorker(worker), sync_oracle, create_inherent_data_providers, + can_author_with, )) } /// Parameters of [`build_aura_worker`]. -pub struct BuildAuraWorkerParams { +pub struct BuildAuraWorkerParams { /// The client to interact with the chain. pub client: Arc, /// The block import. @@ -274,10 +238,6 @@ pub struct BuildAuraWorkerParams { pub max_block_proposal_slot_portion: Option, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, - /// Compatibility mode that should be used. - /// - /// If in doubt, use `Default::default()`. - pub compatibility_mode: CompatibilityMode, } /// Build the aura worker. @@ -296,8 +256,7 @@ pub fn build_aura_worker( max_block_proposal_slot_portion, telemetry, force_authoring, - compatibility_mode, - }: BuildAuraWorkerParams>, + }: BuildAuraWorkerParams, ) -> impl sc_consensus_slots::SimpleSlotWorker< B, Proposer = PF::Proposer, @@ -305,7 +264,7 @@ pub fn build_aura_worker( SyncOracle = SO, JustificationSyncLink = L, Claim = P::Public, - AuxData = Vec>, + EpochData = Vec>, > where B: BlockT, @@ -334,12 +293,11 @@ where telemetry, block_proposal_slot_portion, max_block_proposal_slot_portion, - compatibility_mode, _key_type: PhantomData::

, } } -struct AuraWorker { +struct AuraWorker { client: Arc, block_import: I, env: E, @@ -351,13 +309,12 @@ struct AuraWorker { block_proposal_slot_portion: SlotProportion, max_block_proposal_slot_portion: Option, telemetry: Option, - compatibility_mode: CompatibilityMode, _key_type: PhantomData

, } #[async_trait::async_trait] impl sc_consensus_slots::SimpleSlotWorker - for AuraWorker> + for AuraWorker where B: BlockT, C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync, @@ -380,7 +337,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; - type AuxData = Vec>; + type EpochData = Vec>; fn logging_target(&self) -> &'static str { "aura" @@ -390,20 +347,15 @@ where &mut self.block_import } - fn aux_data( + fn epoch_data( &self, header: &B::Header, _slot: Slot, - ) -> Result { - authorities( - self.client.as_ref(), - header.hash(), - *header.number() + 1u32.into(), - &self.compatibility_mode, - ) + ) -> Result { + authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } - fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option { + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { Some(epoch_data.len()) } @@ -411,7 +363,7 @@ where &self, _header: &B::Header, slot: Slot, - epoch_data: &Self::AuxData, + epoch_data: &Self::EpochData, ) -> Option { let expected_author = slot_author::

(slot, epoch_data); expected_author.and_then(|p| { @@ -437,7 +389,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - _epoch: Self::AuxData, + _epoch: Self::EpochData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -590,42 +542,16 @@ pub fn find_pre_digest(header: &B::Header) -> Resul pre_digest.ok_or_else(|| aura_err(Error::NoDigestFound)) } -fn authorities( - client: &C, - parent_hash: B::Hash, - context_block_number: NumberFor, - compatibility_mode: &CompatibilityMode>, -) -> Result, ConsensusError> +fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> where A: Codec + Debug, B: BlockT, - C: ProvideRuntimeApi, + C: ProvideRuntimeApi + BlockOf, C::Api: AuraApi, { - let runtime_api = client.runtime_api(); - - match compatibility_mode { - CompatibilityMode::None => {}, - // Use `initialize_block` until we hit the block that should disable the mode. - CompatibilityMode::UseInitializeBlock { until } => - if *until > context_block_number { - runtime_api - .initialize_block( - &BlockId::Hash(parent_hash), - &B::Header::new( - context_block_number, - Default::default(), - Default::default(), - parent_hash, - Default::default(), - ), - ) - .map_err(|_| sp_consensus::Error::InvalidAuthoritiesSet)?; - }, - } - - runtime_api - .authorities(&BlockId::Hash(parent_hash)) + client + .runtime_api() + .authorities(at) .ok() .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } @@ -642,7 +568,9 @@ mod tests { use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; - use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; + use sp_consensus::{ + AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal, + }; use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; @@ -650,7 +578,7 @@ mod tests { traits::{Block as BlockT, Header as _}, Digest, }; - use sp_timestamp::Timestamp; + use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{ task::Poll, time::{Duration, Instant}, @@ -660,8 +588,6 @@ mod tests { TestClient, }; - const SLOT_DURATION_MS: u64 = 1000; - type Error = sp_blockchain::Error; struct DummyFactory(Arc); @@ -702,17 +628,19 @@ mod tests { } } + const SLOT_DURATION: u64 = 1000; + type AuraVerifier = import_queue::AuraVerifier< PeersFullClient, AuthorityPair, + AlwaysCanAuthor, Box< dyn CreateInherentDataProviders< TestBlock, (), - InherentDataProviders = (InherentDataProvider,), + InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), >, >, - u64, >; type AuraPeer = Peer<(), PeersClient>; @@ -730,19 +658,21 @@ mod tests { let client = client.as_client(); let slot_duration = slot_duration(&*client).expect("slot duration available"); - assert_eq!(slot_duration.as_millis() as u64, SLOT_DURATION_MS); + assert_eq!(slot_duration.as_millis() as u64, SLOT_DURATION); import_queue::AuraVerifier::new( client, Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + *timestamp, + SlotDuration::from_millis(6000), ); - Ok((slot,)) + + Ok((timestamp, slot)) }), + AlwaysCanAuthor, CheckForEquivocation::Yes, None, - CompatibilityMode::None, ) } @@ -808,7 +738,7 @@ mod tests { let slot_duration = slot_duration(&*client).expect("slot duration available"); aura_futures.push( - start_aura::(StartAuraParams { + start_aura::(StartAuraParams { slot_duration, block_import: client.clone(), select_chain, @@ -817,22 +747,23 @@ mod tests { sync_oracle: DummyOracle, justification_sync_link: (), create_inherent_data_providers: |_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + *timestamp, + SlotDuration::from_millis(6000), ); - Ok((slot,)) + Ok((timestamp, slot)) }, force_authoring: false, backoff_authoring_blocks: Some( BackoffAuthoringOnFinalizedHeadLagging::default(), ), keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, telemetry: None, - compatibility_mode: CompatibilityMode::None, }) .expect("Starts aura"), ); @@ -853,8 +784,7 @@ mod tests { assert_eq!(client.chain_info().best_number, 0); assert_eq!( - authorities(&client, client.chain_info().best_hash, 1, &CompatibilityMode::None) - .unwrap(), + authorities(&client, &BlockId::Number(0)).unwrap(), vec![ Keyring::Alice.public().into(), Keyring::Bob.public().into(), @@ -891,7 +821,7 @@ mod tests { block_import: client, env: environ, keystore: keystore.into(), - sync_oracle: DummyOracle, + sync_oracle: DummyOracle.clone(), justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), @@ -899,7 +829,6 @@ mod tests { _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, - compatibility_mode: Default::default(), }; let head = Header::new( @@ -944,7 +873,7 @@ mod tests { block_import: client.clone(), env: environ, keystore: keystore.into(), - sync_oracle: DummyOracle, + sync_oracle: DummyOracle.clone(), justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Option::<()>::None, @@ -952,13 +881,13 @@ mod tests { _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, - compatibility_mode: Default::default(), }; let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); let res = executor::block_on(worker.on_slot(SlotInfo { slot: 0.into(), + timestamp: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), inherent_data: InherentData::new(), duration: Duration::from_millis(1000), diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 6eefc60552388..59d7854619fc0 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -14,15 +14,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +async-trait = "0.1.50" +codec = { package = "parity-scale-codec", version = "3.0.0", features = [ + "derive", +] } futures = "0.3.21" log = "0.4.17" merlin = "2.0" -num-bigint = "0.4.3" -num-rational = "0.4.1" +num-bigint = "0.2.3" +num-rational = "0.2.2" num-traits = "0.2.8" -parking_lot = "0.12.1" +parking_lot = "0.12.0" +rand = "0.7.2" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" @@ -51,8 +54,8 @@ sp-version = { version = "5.0.0", path = "../../../primitives/version" } [dev-dependencies] rand_chacha = "0.2.2" +tempfile = "3.1.0" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } -sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 8433e3ac92e57..488036277d7b6 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -30,7 +30,7 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" tempfile = "3.1.0" tokio = "1.17.0" sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 288f852a5c989..b000d38a44f02 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -25,7 +25,7 @@ use jsonrpsee::{ types::{error::CallError, ErrorObject}, }; -use sc_consensus_babe::{authorship, Epoch}; +use sc_consensus_babe::{authorship, Config, Epoch}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; @@ -33,9 +33,7 @@ use sp_api::{BlockId, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::{Error as ConsensusError, SelectChain}; -use sp_consensus_babe::{ - digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi, BabeConfiguration, -}; +use sp_consensus_babe::{digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi}; use sp_core::crypto::ByteArray; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as _}; @@ -59,7 +57,7 @@ pub struct Babe { /// shared reference to the Keystore keystore: SyncCryptoStorePtr, /// config (actually holds the slot duration) - babe_config: BabeConfiguration, + babe_config: Config, /// The SelectChain strategy select_chain: SC, /// Whether to deny unsafe calls @@ -72,7 +70,7 @@ impl Babe { client: Arc, shared_epoch_changes: SharedEpochChanges, keystore: SyncCryptoStorePtr, - babe_config: BabeConfiguration, + babe_config: Config, select_chain: SC, deny_unsafe: DenyUnsafe, ) -> Self { @@ -187,7 +185,7 @@ impl From for JsonRpseeError { async fn epoch_data( epoch_changes: &SharedEpochChanges, client: &Arc, - babe_config: &BabeConfiguration, + babe_config: &Config, slot: u64, select_chain: &SC, ) -> Result @@ -204,7 +202,7 @@ where &parent.hash(), *parent.number(), slot.into(), - |slot| Epoch::genesis(babe_config, slot), + |slot| Epoch::genesis(babe_config.genesis_config(), slot), ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(e.to_string())))? .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) @@ -223,7 +221,7 @@ mod tests { TestClientBuilderExt, }; - use sc_consensus_babe::{block_import, AuthorityPair}; + use sc_consensus_babe::{block_import, AuthorityPair, Config}; use std::sync::Arc; /// creates keystore backed by a temp file @@ -245,7 +243,7 @@ mod tests { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let config = sc_consensus_babe::configuration(&*client).expect("config available"); + let config = Config::get(&*client).expect("config available"); let (_, link) = block_import(config.clone(), client.clone(), client.clone()) .expect("can initialize block-import"); diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index b39153faa6d1a..43df26a9a29ae 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -85,7 +85,7 @@ pub(super) fn calculate_primary_threshold( qed.", ); - ((BigUint::one() << 128usize) * numer / denom).to_u128().expect( + ((BigUint::one() << 128) * numer / denom).to_u128().expect( "returns None if the underlying value cannot be represented with 128 bits; \ we start with 2^128 which is one more than can be represented with 128 bits; \ we multiple by p which is defined in [0, 1); \ @@ -310,7 +310,7 @@ mod tests { assert!(claim_slot(10.into(), &epoch, &keystore).is_none()); - epoch.authorities.push((valid_public_key.into(), 10)); + epoch.authorities.push((valid_public_key.clone().into(), 10)); assert_eq!(claim_slot(10.into(), &epoch, &keystore).unwrap().1, valid_public_key.into()); } } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index fef84bda86974..2ab84b9b132cc 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -28,7 +28,7 @@ use sc_consensus_epochs::{ EpochChangesFor, SharedEpochChanges, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_consensus_babe::{BabeBlockWeight, BabeConfiguration}; +use sp_consensus_babe::{BabeBlockWeight, BabeGenesisConfiguration}; use sp_runtime::traits::Block as BlockT; const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; @@ -57,7 +57,7 @@ where /// Load or initialize persistent epoch change data from backend. pub fn load_epoch_changes( backend: &B, - config: &BabeConfiguration, + config: &BabeGenesisConfiguration, ) -> ClientResult> { let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; @@ -143,7 +143,7 @@ mod test { use sc_consensus_epochs::{EpochHeader, PersistedEpoch, PersistedEpochHeader}; use sc_network_test::Block as TestBlock; use sp_consensus::Error as ConsensusError; - use sp_consensus_babe::AllowedSlots; + use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; use sp_core::H256; use sp_runtime::traits::NumberFor; use substrate_test_runtime_client; @@ -182,11 +182,11 @@ mod test { let epoch_changes = load_epoch_changes::( &client, - &BabeConfiguration { + &BabeGenesisConfiguration { slot_duration: 10, epoch_length: 4, c: (3, 10), - authorities: Vec::new(), + genesis_authorities: Vec::new(), randomness: Default::default(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 109e5aade02a7..f61ba23d920f3 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -67,6 +67,7 @@ #![warn(missing_docs)] use std::{ + borrow::Cow, collections::{HashMap, HashSet}, future::Future, pin::Pin, @@ -114,10 +115,11 @@ use sp_blockchain::{ Backend as _, Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult, }; use sp_consensus::{ - BlockOrigin, CacheKeyId, Environment, Error as ConsensusError, Proposer, SelectChain, + BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, + SelectChain, }; use sp_consensus_babe::inherents::BabeInherentData; -use sp_consensus_slots::Slot; +use sp_consensus_slots::{Slot, SlotDuration}; use sp_core::{crypto::ByteArray, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; @@ -135,7 +137,8 @@ pub use sp_consensus_babe::{ PrimaryPreDigest, SecondaryPlainPreDigest, }, AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight, BabeBlockWeight, - BabeConfiguration, BabeEpochConfiguration, ConsensusLog, BABE_ENGINE_ID, VRF_OUTPUT_LENGTH, + BabeEpochConfiguration, BabeGenesisConfiguration, ConsensusLog, BABE_ENGINE_ID, + VRF_OUTPUT_LENGTH, }; pub use aux_schema::load_block_weight as block_weight; @@ -208,12 +211,12 @@ impl From for Epoch { impl Epoch { /// Create the genesis epoch (epoch #0). This is defined to start at the slot of /// the first block, so that has to be provided. - pub fn genesis(genesis_config: &BabeConfiguration, slot: Slot) -> Epoch { + pub fn genesis(genesis_config: &BabeGenesisConfiguration, slot: Slot) -> Epoch { Epoch { epoch_index: 0, start_slot: slot, duration: genesis_config.epoch_length, - authorities: genesis_config.authorities.clone(), + authorities: genesis_config.genesis_authorities.clone(), randomness: genesis_config.randomness, config: BabeEpochConfiguration { c: genesis_config.c, @@ -335,40 +338,60 @@ pub struct BabeIntermediate { /// Intermediate key for Babe engine. pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; -/// Read configuration from the runtime state at current best block. -pub fn configuration(client: &C) -> ClientResult -where - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: BabeApi, -{ - let block_id = if client.usage_info().chain.finalized_state.is_some() { - BlockId::Hash(client.usage_info().chain.best_hash) - } else { - debug!(target: "babe", "No finalized state is available. Reading config from genesis"); - BlockId::Hash(client.usage_info().chain.genesis_hash) - }; +/// Configuration for BABE used for defining block verification parameters as +/// well as authoring (e.g. the slot duration). +#[derive(Clone)] +pub struct Config { + genesis_config: BabeGenesisConfiguration, +} + +impl Config { + /// Create a new config by reading the genesis configuration from the runtime. + pub fn get(client: &C) -> ClientResult + where + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, + { + trace!(target: "babe", "Getting slot duration"); + + let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); + if client.usage_info().chain.finalized_state.is_none() { + debug!(target: "babe", "No finalized state is available. Reading config from genesis"); + best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash); + } + let runtime_api = client.runtime_api(); - let runtime_api = client.runtime_api(); - let version = runtime_api.api_version::>(&block_id)?; + let version = runtime_api.api_version::>(&best_block_id)?; - let config = match version { - Some(1) => { + let genesis_config = if version == Some(1) { #[allow(deprecated)] { - runtime_api.configuration_before_version_2(&block_id)?.into() + runtime_api.configuration_before_version_2(&best_block_id)?.into() } - }, - Some(2) => runtime_api.configuration(&block_id)?, - _ => + } else if version == Some(2) { + runtime_api.configuration(&best_block_id)? + } else { return Err(sp_blockchain::Error::VersionInvalid( "Unsupported or invalid BabeApi version".to_string(), - )), - }; - Ok(config) + )) + }; + + Ok(Config { genesis_config }) + } + + /// Get the genesis configuration. + pub fn genesis_config(&self) -> &BabeGenesisConfiguration { + &self.genesis_config + } + + /// Get the slot duration defined in the genesis configuration. + pub fn slot_duration(&self) -> SlotDuration { + SlotDuration::from_millis(self.genesis_config.slot_duration) + } } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -404,6 +427,9 @@ pub struct BabeParams { /// The source of timestamps for relative slots pub babe_link: BabeLink, + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, + /// The proportion of the slot dedicated to proposing. /// /// The block proposing will be limited to this proportion of the slot from the starting of the @@ -420,7 +446,7 @@ pub struct BabeParams { } /// Start the babe worker. -pub fn start_babe( +pub fn start_babe( BabeParams { keystore, client, @@ -433,10 +459,11 @@ pub fn start_babe( force_authoring, backoff_authoring_blocks, babe_link, + can_author_with, block_proposal_slot_portion, max_block_proposal_slot_portion, telemetry, - }: BabeParams, + }: BabeParams, ) -> Result, sp_consensus::Error> where B: BlockT, @@ -462,6 +489,7 @@ where CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, Error: std::error::Error + Send + From + From + 'static, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -493,6 +521,7 @@ where sc_consensus_slots::SimpleSlotWorkerToSlotWorker(worker), sync_oracle, create_inherent_data_providers, + can_author_with, ); let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); @@ -582,7 +611,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B async fn answer_requests( mut request_rx: Receiver>, - config: BabeConfiguration, + config: Config, client: Arc, epoch_changes: SharedEpochChanges, ) where @@ -611,7 +640,9 @@ async fn answer_requests( .ok_or(Error::::FetchEpoch(parent_hash))?; let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&config, slot)) + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&config.genesis_config, slot) + }) .ok_or(Error::::FetchEpoch(parent_hash))?; Ok(sp_consensus_babe::Epoch { @@ -708,7 +739,7 @@ struct BabeSlotWorker { keystore: SyncCryptoStorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, - config: BabeConfiguration, + config: Config, block_proposal_slot_portion: SlotProportion, max_block_proposal_slot_portion: Option, telemetry: Option, @@ -729,6 +760,7 @@ where BS: BackoffAuthoringBlocksStrategy> + Sync, Error: std::error::Error + Send + From + From + 'static, { + type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; @@ -736,7 +768,6 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; - type AuxData = ViableEpochDescriptor, Epoch>; fn logging_target(&self) -> &'static str { "babe" @@ -746,7 +777,11 @@ where &mut self.block_import } - fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { + fn epoch_data( + &self, + parent: &B::Header, + slot: Slot, + ) -> Result { self.epoch_changes .shared_data() .epoch_descriptor_for_child_of( @@ -759,10 +794,12 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { self.epoch_changes .shared_data() - .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .viable_epoch(epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + }) .map(|epoch| epoch.as_ref().authorities.len()) } @@ -777,7 +814,9 @@ where slot, self.epoch_changes .shared_data() - .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))? + .viable_epoch(epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + })? .as_ref(), &self.keystore, ); @@ -819,7 +858,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, (_, public): Self::Claim, - epoch_descriptor: Self::AuxData, + epoch_descriptor: Self::EpochData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -852,8 +891,10 @@ where import_block.body = Some(body); import_block.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - import_block - .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); Ok(import_block) } @@ -979,7 +1020,7 @@ fn find_next_config_digest( #[derive(Clone)] pub struct BabeLink { epoch_changes: SharedEpochChanges, - config: BabeConfiguration, + config: Config, } impl BabeLink { @@ -989,27 +1030,29 @@ impl BabeLink { } /// Get the config of this link. - pub fn config(&self) -> &BabeConfiguration { + pub fn config(&self) -> &Config { &self.config } } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, select_chain: SelectChain, create_inherent_data_providers: CIDP, - config: BabeConfiguration, + config: Config, epoch_changes: SharedEpochChanges, + can_author_with: CAW, telemetry: Option, } -impl BabeVerifier +impl BabeVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { async fn check_inherents( @@ -1020,6 +1063,16 @@ where create_inherent_data_providers: CIDP::InherentDataProviders, execution_context: ExecutionContext, ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "babe", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self .client .runtime_api() @@ -1124,8 +1177,8 @@ type BlockVerificationResult = Result<(BlockImportParams, Option)>>), String>; #[async_trait::async_trait] -impl Verifier - for BabeVerifier +impl Verifier + for BabeVerifier where Block: BlockT, Client: HeaderMetadata @@ -1136,6 +1189,7 @@ where + AuxStore, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, CIDP: CreateInherentDataProviders + Send + Sync, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -1191,7 +1245,9 @@ where .map_err(|e| Error::::ForkTree(Box::new(e)))? .ok_or(Error::::FetchEpoch(parent_hash))?; let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + }) .ok_or(Error::::FetchEpoch(parent_hash))?; // We add one to the current slot to allow for some small drift. @@ -1230,26 +1286,24 @@ where warn!(target: "babe", "Error checking/reporting BABE equivocation: {}", err); } + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. if let Some(inner_body) = block.body { + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.babe_replace_inherent_data(slot); let new_block = Block::new(pre_header.clone(), inner_body); - if !block.state_action.skip_execution_checks() { - // if the body is passed through and the block was executed, - // we need to use the runtime to check that the internally-set - // timestamp in the inherents actually matches the slot set in the seal. - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .map_err(Error::::CreateInherents)?; - inherent_data.babe_replace_inherent_data(slot); - - self.check_inherents( - new_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - create_inherent_data_providers, - block.origin.into(), - ) - .await?; - } + + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; let (_, inner_body) = new_block.deconstruct(); block.body = Some(inner_body); @@ -1265,9 +1319,9 @@ where block.header = pre_header; block.post_digests.push(verified_info.seal); - block.insert_intermediate( - INTERMEDIATE_KEY, - BabeIntermediate:: { epoch_descriptor }, + block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); block.post_hash = Some(hash); @@ -1299,7 +1353,7 @@ pub struct BabeBlockImport { inner: I, client: Arc, epoch_changes: SharedEpochChanges, - config: BabeConfiguration, + config: Config, } impl Clone for BabeBlockImport { @@ -1318,7 +1372,7 @@ impl BabeBlockImport { client: Arc, epoch_changes: SharedEpochChanges, block_import: I, - config: BabeConfiguration, + config: Config, ) -> Self { BabeBlockImport { client, inner: block_import, epoch_changes, config } } @@ -1419,7 +1473,7 @@ where match self.client.status(BlockId::Hash(hash)) { Ok(sp_blockchain::BlockStatus::InChain) => { // When re-importing existing block strip away intermediates. - let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); + let _ = block.take_intermediate::>(INTERMEDIATE_KEY); block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); return self.inner.import_block(block, new_cache).await.map_err(Into::into) }, @@ -1488,7 +1542,7 @@ where }; let intermediate = - block.remove_intermediate::>(INTERMEDIATE_KEY)?; + block.take_intermediate::>(INTERMEDIATE_KEY)?; let epoch_descriptor = intermediate.epoch_descriptor; let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); @@ -1526,7 +1580,9 @@ where old_epoch_changes = Some((*epoch_changes).clone()); let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + }) .ok_or_else(|| { ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) })?; @@ -1705,7 +1761,7 @@ where /// Also returns a link object used to correctly instantiate the import queue /// and background worker. pub fn block_import( - config: BabeConfiguration, + config: Config, wrapped_block_import: I, client: Arc, ) -> ClientResult<(BabeBlockImport, BabeLink)> @@ -1716,7 +1772,8 @@ where + PreCommitActions + 'static, { - let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; + let epoch_changes = + aux_schema::load_epoch_changes::(&*client, &config.genesis_config)?; let link = BabeLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; // NOTE: this isn't entirely necessary, but since we didn't use to prune the @@ -1748,7 +1805,7 @@ where /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, @@ -1757,6 +1814,7 @@ pub fn import_queue( create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, + can_author_with: CAW, telemetry: Option, ) -> ClientResult> where @@ -1776,6 +1834,7 @@ where + 'static, Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -1784,6 +1843,7 @@ where create_inherent_data_providers, config: babe_link.config, epoch_changes: babe_link.epoch_changes, + can_author_with, telemetry, client, }; @@ -1824,9 +1884,9 @@ where // Revert epoch changes tree. - // This config is only used on-genesis. - let config = configuration(&*client)?; - let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; + let config = Config::get(&*client)?; + let epoch_changes = + aux_schema::load_epoch_changes::(&*client, config.genesis_config())?; let mut epoch_changes = epoch_changes.shared_data(); if revert_up_to_number == Zero::zero() { diff --git a/client/consensus/babe/src/migration.rs b/client/consensus/babe/src/migration.rs index 23413aa6a7b1b..a8c3772bbefb8 100644 --- a/client/consensus/babe/src/migration.rs +++ b/client/consensus/babe/src/migration.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{ - AuthorityId, BabeAuthorityWeight, BabeConfiguration, BabeEpochConfiguration, Epoch, + AuthorityId, BabeAuthorityWeight, BabeEpochConfiguration, BabeGenesisConfiguration, Epoch, NextEpochDescriptor, VRF_OUTPUT_LENGTH, }; use codec::{Decode, Encode}; @@ -64,7 +64,7 @@ impl EpochT for EpochV0 { impl EpochV0 { /// Migrate the sturct to current epoch version. - pub fn migrate(self, config: &BabeConfiguration) -> Epoch { + pub fn migrate(self, config: &BabeGenesisConfiguration) -> Epoch { Epoch { epoch_index: self.epoch_index, start_slot: self.start_slot, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 8bef1b38b929d..c0a7a8c6c013a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -22,33 +22,27 @@ use super::*; use authorship::claim_slot; use futures::executor::block_on; use log::debug; -use rand_chacha::{ - rand_core::{RngCore, SeedableRng}, - ChaChaRng, -}; +use rand::RngCore; +use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; +use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; -use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus::{AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_babe::{ inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, AuthorityPair, Slot, }; -use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; -use sp_keyring::Sr25519Keyring; -use sp_keystore::{ - testing::KeyStore as TestKeyStore, vrf::make_transcript as transcript_from_data, - SyncCryptoStore, -}; +use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; -use sp_timestamp::Timestamp; +use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{cell::RefCell, task::Poll, time::Duration}; type Item = DigestItem; @@ -73,13 +67,11 @@ type Mutator = Arc; type BabeBlockImport = PanickingBlockImport>>; -const SLOT_DURATION_MS: u64 = 1000; - #[derive(Clone)] struct DummyFactory { client: Arc, epoch_changes: SharedEpochChanges, - config: BabeConfiguration, + config: Config, mutator: Mutator, } @@ -147,7 +139,7 @@ impl DummyProposer { &self.parent_hash, self.parent_number, this_slot, - |slot| Epoch::genesis(&self.factory.config, slot), + |slot| Epoch::genesis(self.factory.config.genesis_config(), slot), ) .expect("client has data to find epoch") .expect("can compute epoch for baked block"); @@ -160,7 +152,7 @@ impl DummyProposer { // that will re-check the randomness logic off-chain. let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { authorities: epoch.authorities.clone(), - randomness: epoch.randomness, + randomness: epoch.randomness.clone(), }) .encode(); let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); @@ -242,11 +234,12 @@ pub struct TestVerifier { TestBlock, PeersFullClient, TestSelectChain, + AlwaysCanAuthor, Box< dyn CreateInherentDataProviders< TestBlock, (), - InherentDataProviders = (InherentDataProvider,), + InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), >, >, >, @@ -295,7 +288,7 @@ impl TestNetFactory for BabeTestNet { ) { let client = client.as_client(); - let config = crate::configuration(&*client).expect("config available"); + let config = Config::get(&*client).expect("config available"); let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) .expect("can initialize block-import"); @@ -328,14 +321,17 @@ impl TestNetFactory for BabeTestNet { client: client.clone(), select_chain: longest_chain, create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + *timestamp, + SlotDuration::from_millis(6000), ); - Ok((slot,)) + + Ok((timestamp, slot)) }), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), + can_author_with: AlwaysCanAuthor, telemetry: None, }, mutator: MUTATOR.with(|m| m.borrow().clone()), @@ -368,13 +364,6 @@ fn rejects_empty_block() { }) } -fn create_keystore(authority: Sr25519Keyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeyStore::new()); - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(&authority.to_seed())) - .expect("Generates authority key"); - keystore -} - fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) { sp_tracing::try_init_simple(); let mutator = Arc::new(mutator) as Mutator; @@ -382,19 +371,25 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); let net = BabeTestNet::new(3); - let peers = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; + let peers = &[(0, "//Alice"), (1, "//Bob"), (2, "//Charlie")]; let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); let mut babe_futures = Vec::new(); + let mut keystore_paths = Vec::new(); - for (peer_id, auth_id) in peers.iter().enumerate() { + for (peer_id, seed) in peers { let mut net = net.lock(); - let peer = net.peer(peer_id); + let peer = net.peer(*peer_id); let client = peer.client().as_client(); let select_chain = peer.select_chain().expect("Full client has select_chain"); - let keystore = create_keystore(*auth_id); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(seed)) + .expect("Generates authority key"); + keystore_paths.push(keystore_path); let mut got_own = false; let mut got_other = false; @@ -439,16 +434,19 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static env: environ, sync_oracle: DummyOracle, create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + *timestamp, + SlotDuration::from_millis(6000), ); - Ok((slot,)) + + Ok((timestamp, slot)) }), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), babe_link: data.link.clone(), keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, justification_sync_link: (), block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, @@ -542,14 +540,16 @@ fn sig_is_not_pre_digest() { #[test] fn can_author_block() { sp_tracing::try_init_simple(); - - let authority = Sr25519Keyring::Alice; - let keystore = create_keystore(authority); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) + .expect("Generates authority pair"); let mut i = 0; let epoch = Epoch { start_slot: 0.into(), - authorities: vec![(authority.public().into(), 1)], + authorities: vec![(public.into(), 1)], randomness: [0; 32], epoch_index: 1, duration: 100, @@ -559,11 +559,11 @@ fn can_author_block() { }, }; - let mut config = crate::BabeConfiguration { + let mut config = crate::BabeGenesisConfiguration { slot_duration: 1000, epoch_length: 100, c: (3, 10), - authorities: Vec::new(), + genesis_authorities: Vec::new(), randomness: [0; 32], allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }; @@ -644,8 +644,10 @@ fn propose_and_import_block( let mut import = BlockImportParams::new(BlockOrigin::Own, block.header); import.post_digests.push(seal); import.body = Some(block.extrinsics); - import - .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); + import.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); let import_result = block_on(block_import.import_block(import, Default::default())).unwrap(); @@ -706,12 +708,12 @@ fn importing_block_one_sets_genesis_epoch() { &mut block_import, ); - let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); + let genesis_epoch = Epoch::genesis(data.link.config.genesis_config(), 999.into()); let epoch_changes = data.link.epoch_changes.shared_data(); let epoch_for_second_block = epoch_changes .epoch_data_for_child_of(descendent_query(&*client), &block_hash, 1, 1000.into(), |slot| { - Epoch::genesis(&data.link.config, slot) + Epoch::genesis(data.link.config.genesis_config(), slot) }) .unwrap() .unwrap(); @@ -769,14 +771,16 @@ fn revert_prunes_epoch_changes_and_removes_weights() { // Load and check epoch changes. - let actual_nodes = - aux_schema::load_epoch_changes::(&*client, &data.link.config) - .expect("load epoch changes") - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| *h) - .collect::>(); + let actual_nodes = aux_schema::load_epoch_changes::( + &*client, + data.link.config.genesis_config(), + ) + .expect("load epoch changes") + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| *h) + .collect::>(); let expected_nodes = vec![ canon[0], // A @@ -824,7 +828,7 @@ fn revert_not_allowed_for_finalized() { let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 3); // Finalize best block - client.finalize_block(canon[2], None, false).unwrap(); + client.finalize_block(BlockId::Hash(canon[2]), None, false).unwrap(); // Revert canon chain to last finalized block revert(client.clone(), backend, 100).expect("revert should work for baked test scenario"); @@ -871,12 +875,12 @@ fn importing_epoch_change_block_prunes_tree() { // Create and import the canon chain and keep track of fork blocks (A, C, D) // from the diagram above. - let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 30); + let canon_hashes = propose_and_import_blocks_wrap(BlockId::Number(0), 30); // Create the forks - let fork_1 = propose_and_import_blocks_wrap(BlockId::Hash(canon[0]), 10); - let fork_2 = propose_and_import_blocks_wrap(BlockId::Hash(canon[12]), 15); - let fork_3 = propose_and_import_blocks_wrap(BlockId::Hash(canon[18]), 10); + let fork_1 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[0]), 10); + let fork_2 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[12]), 15); + let fork_3 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[18]), 10); // We should be tracking a total of 9 epochs in the fork tree assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9); @@ -886,31 +890,51 @@ fn importing_epoch_change_block_prunes_tree() { // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). - client.finalize_block(canon[12], None, false).unwrap(); + client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7); - let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect(); - - // no hashes from the first fork must exist on the tree - assert!(!nodes.iter().any(|h| fork_1.contains(h))); + // at this point no hashes from the first fork must exist on the tree + assert!(!epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_1.contains(h)),); // but the epoch changes from the other forks must still exist - assert!(nodes.iter().any(|h| fork_2.contains(h))); - assert!(nodes.iter().any(|h| fork_3.contains(h))); + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h))); + + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(canon[24], None, false).unwrap(); + client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8); - let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect(); - - // no hashes from the other forks must exist on the tree - assert!(!nodes.iter().any(|h| fork_2.contains(h))); - assert!(!nodes.iter().any(|h| fork_3.contains(h))); + // at this point no hashes from the second fork must exist on the tree + assert!(!epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h)),); - // Check that we contain the nodes that we care about - assert!(nodes.iter().any(|h| *h == canon[18])); - assert!(nodes.iter().any(|h| *h == canon[24])); + // while epoch changes from the last fork should still exist + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); } #[test] @@ -951,13 +975,15 @@ fn verify_slots_are_strictly_increasing() { #[test] fn babe_transcript_generation_match() { sp_tracing::try_init_simple(); - - let authority = Sr25519Keyring::Alice; - let _keystore = create_keystore(authority); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) + .expect("Generates authority pair"); let epoch = Epoch { start_slot: 0.into(), - authorities: vec![(authority.public().into(), 1)], + authorities: vec![(public.into(), 1)], randomness: [0; 32], epoch_index: 1, duration: 100, @@ -1031,7 +1057,7 @@ fn obsolete_blocks_aux_data_cleanup() { assert!(aux_data_check(&fork3_hashes, true)); // Finalize A3 - client.finalize_block(fork1_hashes[2], None, true).unwrap(); + client.finalize_block(BlockId::Number(3), None, true).unwrap(); // Wiped: A1, A2 assert!(aux_data_check(&fork1_hashes[..2], false)); @@ -1042,7 +1068,7 @@ fn obsolete_blocks_aux_data_cleanup() { // Present C4, C5 assert!(aux_data_check(&fork3_hashes, true)); - client.finalize_block(fork1_hashes[3], None, true).unwrap(); + client.finalize_block(BlockId::Number(4), None, true).unwrap(); // Wiped: A3 assert!(aux_data_check(&fork1_hashes[2..3], false)); diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index d5745665a79fd..12b630f36b89b 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.42" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" -libp2p = { version = "0.49.0", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -parking_lot = "0.12.1" +parking_lot = "0.12.0" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index f888176addd2d..10739f63ef779 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -153,18 +153,6 @@ pub enum StateAction { Skip, } -impl StateAction { - /// Check if execution checks that require runtime calls should be skipped. - pub fn skip_execution_checks(&self) -> bool { - match self { - StateAction::ApplyChanges(_) | - StateAction::Execute | - StateAction::ExecuteIfPossible => false, - StateAction::Skip => true, - } - } -} - /// Data required to import a Block. #[non_exhaustive] pub struct BlockImportParams { @@ -294,23 +282,18 @@ impl BlockImportParams { } } - /// Insert intermediate by given key. - pub fn insert_intermediate(&mut self, key: &'static [u8], value: T) { - self.intermediates.insert(Cow::from(key), Box::new(value)); - } - - /// Remove and return intermediate by given key. - pub fn remove_intermediate(&mut self, key: &[u8]) -> Result { + /// Take intermediate by given key, and remove it from the processing list. + pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; - v.downcast::().map(|v| *v).map_err(|v| { + v.downcast::().map_err(|v| { self.intermediates.insert(k, v); Error::InvalidIntermediate }) } /// Get a reference to a given intermediate. - pub fn get_intermediate(&self, key: &[u8]) -> Result<&T, Error> { + pub fn intermediate(&self, key: &[u8]) -> Result<&T, Error> { self.intermediates .get(key) .ok_or(Error::NoIntermediate)? @@ -319,7 +302,7 @@ impl BlockImportParams { } /// Get a mutable reference to a given intermediate. - pub fn get_intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { + pub fn intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { self.intermediates .get_mut(key) .ok_or(Error::NoIntermediate)? diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index 3741fa99663cd..c71e21ccd4b00 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -62,8 +62,8 @@ pub type BoxBlockImport = pub type BoxJustificationImport = Box + Send + Sync>; -/// Maps to the RuntimeOrigin used by the network. -pub type RuntimeOrigin = libp2p::PeerId; +/// Maps to the Origin used by the network. +pub type Origin = libp2p::PeerId; /// Block data used by the queue. #[derive(Debug, PartialEq, Eq, Clone)] @@ -79,7 +79,7 @@ pub struct IncomingBlock { /// Justification(s) if requested. pub justifications: Option, /// The peer, we received this from - pub origin: Option, + pub origin: Option, /// Allow importing the block skipping state verification if parent state is missing. pub allow_missing_state: bool, /// Skip block execution and state verification. @@ -112,7 +112,7 @@ pub trait ImportQueue: Send { /// Import block justifications. fn import_justifications( &mut self, - who: RuntimeOrigin, + who: Origin, hash: B::Hash, number: NumberFor, justifications: Justifications, @@ -140,7 +140,7 @@ pub trait Link: Send { /// Justification import result. fn justification_imported( &mut self, - _who: RuntimeOrigin, + _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool, @@ -155,9 +155,9 @@ pub trait Link: Send { #[derive(Debug, PartialEq)] pub enum BlockImportStatus { /// Imported known block. - ImportedKnown(N, Option), + ImportedKnown(N, Option), /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), + ImportedUnknown(N, ImportedAux, Option), } impl BlockImportStatus { @@ -175,15 +175,15 @@ impl BlockImportStatus { pub enum BlockImportError { /// Block missed header, can't be imported #[error("block is missing a header (origin = {0:?})")] - IncompleteHeader(Option), + IncompleteHeader(Option), /// Block verification failed, can't be imported #[error("block verification failed (origin = {0:?}): {1}")] - VerificationFailed(Option, String), + VerificationFailed(Option, String), /// Block is known to be Bad #[error("bad block (origin = {0:?})")] - BadBlock(Option), + BadBlock(Option), /// Parent state is missing. #[error("block is missing parent state")] diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 0e607159b75c3..9fe293142050b 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -34,7 +34,7 @@ use crate::{ import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, - BoxJustificationImport, ImportQueue, IncomingBlock, Link, RuntimeOrigin, Verifier, + BoxJustificationImport, ImportQueue, IncomingBlock, Link, Origin, Verifier, }, metrics::Metrics, }; @@ -120,7 +120,7 @@ impl ImportQueue for BasicQueue fn import_justifications( &mut self, - who: RuntimeOrigin, + who: Origin, hash: B::Hash, number: NumberFor, justifications: Justifications, @@ -152,7 +152,7 @@ mod worker_messages { pub struct ImportBlocks(pub BlockOrigin, pub Vec>); pub struct ImportJustification( - pub RuntimeOrigin, + pub Origin, pub B::Hash, pub NumberFor, pub Justification, @@ -289,7 +289,7 @@ impl BlockImportWorker { async fn import_justification( &mut self, - who: RuntimeOrigin, + who: Origin, hash: B::Hash, number: NumberFor, justification: Justification, @@ -530,12 +530,12 @@ mod tests { fn justification_imported( &mut self, - _who: RuntimeOrigin, + _who: Origin, hash: &Hash, _number: BlockNumber, _success: bool, ) { - self.events.push(Event::JustificationImported(*hash)) + self.events.push(Event::JustificationImported(hash.clone())) } } diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index 5d418dddf0853..d3d91f5bd31c5 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -38,7 +38,7 @@ //! }); //! ``` -use crate::import_queue::{Link, RuntimeOrigin}; +use crate::import_queue::{Link, Origin}; use futures::prelude::*; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -82,7 +82,7 @@ impl Clone for BufferedLinkSender { /// Internal buffered message. enum BlockImportWorkerMsg { BlocksProcessed(usize, usize, Vec<(BlockImportResult, B::Hash)>), - JustificationImported(RuntimeOrigin, B::Hash, NumberFor, bool), + JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), } @@ -100,7 +100,7 @@ impl Link for BufferedLinkSender { fn justification_imported( &mut self, - who: RuntimeOrigin, + who: Origin, hash: &B::Hash, number: NumberFor, success: bool, diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index f8b6253ef2353..fee69613debf0 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -518,8 +518,8 @@ where let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(ref epoch_0, _) => epoch_0.start_slot <= slot, - PersistedEpochHeader::Regular(ref epoch_n) => epoch_n.start_slot <= slot, + PersistedEpochHeader::Genesis(_, ref epoch_1) => slot >= epoch_1.end_slot, + PersistedEpochHeader::Regular(ref epoch_n) => slot >= epoch_n.end_slot, }; // prune any epochs which could not be _live_ as of the children of the @@ -777,6 +777,11 @@ where } } + /// Return the inner fork tree. + pub fn tree(&self) -> &ForkTree> { + &self.inner + } + /// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and /// `hash`. pub fn reset(&mut self, parent_hash: Hash, hash: Hash, number: Number, current: E, next: E) { @@ -827,11 +832,6 @@ where self.epochs.remove(&(h, n)); }); } - - /// Return the inner fork tree (mostly useful for testing) - pub fn tree(&self) -> &ForkTree> { - &self.inner - } } /// Type alias to produce the epoch-changes tree from a block type. @@ -1063,7 +1063,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1080,7 +1080,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) @@ -1114,89 +1114,6 @@ mod tests { } } - #[test] - fn prune_removes_stale_nodes() { - // +---D +-------F - // | | - // 0---A---B--(x)--C--(y)--G - // | | - // +---H +-------E - // - // Test parameters: - // - epoch duration: 100 - // - // We are going to prune the tree at: - // - 'x', a node between B and C - // - 'y', a node between C and G - - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, block) { - (b"0", _) => Ok(true), - (b"A", b) => Ok(b != b"0"), - (b"B", b) => Ok(b != b"0" && b != b"A" && b != b"D"), - (b"C", b) => Ok(b == b"F" || b == b"G" || b == b"y"), - (b"x", b) => Ok(b == b"C" || b == b"F" || b == b"G" || b == b"y"), - (b"y", b) => Ok(b == b"G"), - _ => Ok(false), - } - }; - - let mut epoch_changes = EpochChanges::new(); - - let mut import_at = |slot, hash: &Hash, number, parent_hash, parent_number| { - let make_genesis = |slot| Epoch { start_slot: slot, duration: 100 }; - // Get epoch descriptor valid for 'slot' - let epoch_descriptor = epoch_changes - .epoch_descriptor_for_child_of(&is_descendent_of, parent_hash, parent_number, slot) - .unwrap() - .unwrap(); - // Increment it - let next_epoch_desc = epoch_changes - .viable_epoch(&epoch_descriptor, &make_genesis) - .unwrap() - .increment(()); - // Assign it to hash/number - epoch_changes - .import(&is_descendent_of, *hash, number, *parent_hash, next_epoch_desc) - .unwrap(); - }; - - import_at(100, b"A", 10, b"0", 0); - import_at(200, b"B", 20, b"A", 10); - import_at(300, b"C", 30, b"B", 20); - import_at(200, b"D", 20, b"A", 10); - import_at(300, b"E", 30, b"B", 20); - import_at(400, b"F", 40, b"C", 30); - import_at(400, b"G", 40, b"C", 30); - import_at(100, b"H", 10, b"0", 0); - - let mut nodes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| h).collect(); - nodes.sort(); - assert_eq!(nodes, vec![b"A", b"B", b"C", b"D", b"E", b"F", b"G", b"H"]); - - // Finalize block 'x' @ number 25, slot 230 - // This should prune all nodes imported by blocks with a number < 25 that are not - // ancestors of 'x' and all nodes before the one holding the epoch information - // to which 'x' belongs to (i.e. before A). - - epoch_changes.prune_finalized(&is_descendent_of, b"x", 25, 230).unwrap(); - - let mut nodes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| h).collect(); - nodes.sort(); - assert_eq!(nodes, vec![b"A", b"B", b"C", b"E", b"F", b"G"]); - - // Finalize block y @ number 35, slot 330 - // This should prune all nodes imported by blocks with a number < 35 that are not - // ancestors of 'y' and all nodes before the one holding the epoch information - // to which 'y' belongs to (i.e. before B). - - epoch_changes.prune_finalized(&is_descendent_of, b"y", 35, 330).unwrap(); - - let mut nodes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| h).collect(); - nodes.sort(); - assert_eq!(nodes, vec![b"B", b"C", b"F", b"G"]); - } - /// Test that ensures that the gap is not enabled when we import multiple genesis blocks. #[test] fn gap_is_not_enabled_when_multiple_genesis_epochs_are_imported() { @@ -1228,7 +1145,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1245,7 +1162,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) @@ -1303,7 +1220,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"1", 1, *b"0", incremented_epoch) @@ -1413,7 +1330,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1430,7 +1347,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&epoch_b_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"B", 201, *b"A", incremented_epoch) @@ -1447,7 +1364,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor); + .increment(next_descriptor.clone()); epoch_changes .import(&is_descendent_of, *b"C", 1, *b"0", incremented_epoch) diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 9c3bc5413317d..83c156ef5667f 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } assert_matches = "1.3.0" -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index a812bb028c7f2..b5dfc3d809c13 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -39,7 +39,7 @@ pub trait ConsensusDataProvider: Send + Sync { /// Attempt to create a consensus digest. fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; - /// Set up the necessary import params. + /// set up the neccessary import params. fn append_block_import( &self, parent: &B::Header, diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 206f5163a13cd..cc73a3fa961ce 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -24,13 +24,14 @@ use crate::Error; use codec::Encode; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus_babe::{ - authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Epoch, INTERMEDIATE_KEY, + authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Config, Epoch, + INTERMEDIATE_KEY, }; use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; use sp_keystore::SyncCryptoStorePtr; -use std::{marker::PhantomData, sync::Arc}; +use std::{borrow::Cow, marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; @@ -39,7 +40,7 @@ use sp_consensus::CacheKeyId; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, inherents::BabeInherentData, - AuthorityId, BabeApi, BabeAuthorityWeight, BabeConfiguration, ConsensusLog, BABE_ENGINE_ID, + AuthorityId, BabeApi, BabeAuthorityWeight, ConsensusLog, BABE_ENGINE_ID, }; use sp_consensus_slots::Slot; use sp_inherents::InherentData; @@ -63,10 +64,7 @@ pub struct BabeConsensusDataProvider { epoch_changes: SharedEpochChanges, /// BABE config, gotten from the runtime. - /// NOTE: This is used to fetch `slot_duration` and `epoch_length` in the - /// `ConsensusDataProvider` implementation. Correct as far as these values - /// are not changed during an epoch change. - config: BabeConfiguration, + config: Config, /// Authorities to be used for this babe chain. authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, @@ -125,8 +123,10 @@ where // drop the lock drop(epoch_changes); - import_params - .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); + import_params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); Ok((import_params, None)) } @@ -152,7 +152,7 @@ where return Err(Error::StringError("Cannot supply empty authority set!".into())) } - let config = sc_consensus_babe::configuration(&*client)?; + let config = Config::get(&*client)?; Ok(Self { config, @@ -177,7 +177,9 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?; let epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(self.config.genesis_config(), slot) + }) .ok_or_else(|| { log::info!(target: "babe", "create_digest: no viable_epoch :("); sp_consensus::Error::InvalidAuthoritiesSet @@ -304,7 +306,7 @@ where identifier, EpochHeader { start_slot: slot, - end_slot: (*slot * self.config.epoch_length).into(), + end_slot: (*slot * self.config.genesis_config().epoch_length).into(), }, ), _ => unreachable!( @@ -313,7 +315,10 @@ where }; } - params.insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); + params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); Ok(()) } diff --git a/client/consensus/manual-seal/src/consensus/timestamp.rs b/client/consensus/manual-seal/src/consensus/timestamp.rs index f899b80d6c9af..e7f4e709ab996 100644 --- a/client/consensus/manual-seal/src/consensus/timestamp.rs +++ b/client/consensus/manual-seal/src/consensus/timestamp.rs @@ -46,10 +46,10 @@ use std::{ /// This works by either fetching the `slot_number` from the most recent header and dividing /// that value by `slot_duration` in order to fork chains that expect this inherent. /// -/// It produces timestamp inherents that are increased by `slot_duration` whenever +/// It produces timestamp inherents that are increaed by `slot_duraation` whenever /// `provide_inherent_data` is called. pub struct SlotTimestampProvider { - // holds the unix millisecond timestamp for the most recent block + // holds the unix millisecnd timestamp for the most recent block unix_millis: atomic::AtomicU64, // configured slot_duration in the runtime slot_duration: SlotDuration, @@ -63,7 +63,7 @@ impl SlotTimestampProvider { C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, C::Api: BabeApi, { - let slot_duration = sc_consensus_babe::configuration(&*client)?.slot_duration(); + let slot_duration = sc_consensus_babe::Config::get(&*client)?.slot_duration(); let time = Self::with_header(&client, slot_duration, |header| { let slot_number = *sc_consensus_babe::find_pre_digest::(&header) diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index cee4d59b6d6e5..d134ce7734571 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -20,7 +20,7 @@ use crate::rpc; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use sp_runtime::{traits::Block as BlockT, Justification}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification}; use std::{marker::PhantomData, sync::Arc}; /// params for block finalization. @@ -46,7 +46,7 @@ where { let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; - match finalizer.finalize_block(hash, justification, true) { + match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { Err(e) => { log::warn!("Failed to finalize block {}", e); rpc::send_result(&mut sender, Err(e.into())) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 09ab139b91c73..ba63666f3e46c 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -247,66 +247,13 @@ pub async fn run_instant_seal( .await } -/// Runs the background authorship task for the instant seal engine. -/// instant-seal creates a new block for every transaction imported into -/// the transaction pool. -/// -/// This function will finalize the block immediately as well. If you don't -/// want this behavior use `run_instant_seal` instead. -pub async fn run_instant_seal_and_finalize( - InstantSealParams { - block_import, - env, - client, - pool, - select_chain, - consensus_data_provider, - create_inherent_data_providers, - }: InstantSealParams, -) where - B: BlockT + 'static, - BI: BlockImport> - + Send - + Sync - + 'static, - C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Proposer: Proposer>, - SC: SelectChain + 'static, - TransactionFor: 'static, - TP: TransactionPool, - CIDP: CreateInherentDataProviders, - P: Send + Sync + 'static, -{ - // Creates and finalizes blocks as soon as transactions are imported - // into the transaction pool. - let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { - create_empty: false, - finalize: true, - parent_hash: None, - sender: None, - }); - - run_manual_seal(ManualSealParams { - block_import, - env, - client, - pool, - commands_stream, - select_chain, - consensus_data_provider, - create_inherent_data_providers, - }) - .await -} - #[cfg(test)] mod tests { use super::*; use sc_basic_authorship::ProposerFactory; + use sc_client_api::BlockBackend; use sc_consensus::ImportedAux; - use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; + use sc_transaction_pool::{BasicPool, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; use sp_inherents::InherentData; use sp_runtime::generic::{BlockId, Digest, DigestItem}; @@ -358,7 +305,6 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -367,8 +313,6 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, - genesis_hash, - genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as transactions are imported into the @@ -411,7 +355,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash, + hash: created_block.hash.clone(), aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -431,7 +375,6 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -440,8 +383,6 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, - genesis_hash, - genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the @@ -481,7 +422,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash, + hash: created_block.hash.clone(), aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -510,13 +451,8 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let pool_api = Arc::new(FullChainApi::new( - client.clone(), - None, - &sp_core::testing::TaskExecutor::new(), - )); + let pool_api = api(); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -525,8 +461,6 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, - genesis_hash, - genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the @@ -562,12 +496,13 @@ mod tests { .await .unwrap(); let created_block = rx.await.unwrap().unwrap(); + pool_api.increment_nonce(Alice.into()); // assert that the background task returns ok assert_eq!( created_block, CreatedBlock { - hash: created_block.hash, + hash: created_block.hash.clone(), aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -577,7 +512,8 @@ mod tests { } } ); - + let block = client.block(&BlockId::Number(1)).unwrap().unwrap().block; + pool_api.add_block(block, true); assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); @@ -598,6 +534,9 @@ mod tests { .await .is_ok()); assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); + let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; + pool_api.add_block(block, true); + pool_api.increment_nonce(Alice.into()); assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); @@ -621,7 +560,6 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -630,8 +568,6 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, - genesis_hash, - genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 4833786d2b990..7e9b43fac8a57 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parking_lot = "0.12.1" +parking_lot = "0.12.0" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index dcf069d617bab..6f9ee6f864ad8 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -56,7 +56,9 @@ use sc_consensus::{ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; -use sp_consensus::{Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{ + CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, +}; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_core::ExecutionContext; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; @@ -65,7 +67,10 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, RuntimeString, }; -use std::{cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + borrow::Cow, cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, + time::Duration, +}; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -209,17 +214,18 @@ pub trait PowAlgorithm { } /// A block importer for PoW. -pub struct PowBlockImport { +pub struct PowBlockImport { algorithm: Algorithm, inner: I, select_chain: S, client: Arc, create_inherent_data_providers: Arc, check_inherents_after: <::Header as HeaderT>::Number, + can_author_with: CAW, } -impl Clone - for PowBlockImport +impl Clone + for PowBlockImport { fn clone(&self) -> Self { Self { @@ -229,11 +235,12 @@ impl Clone client: self.client.clone(), create_inherent_data_providers: self.create_inherent_data_providers.clone(), check_inherents_after: self.check_inherents_after, + can_author_with: self.can_author_with.clone(), } } } -impl PowBlockImport +impl PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -241,6 +248,7 @@ where C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, + CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { /// Create a new block import suitable to be used in PoW @@ -251,6 +259,7 @@ where check_inherents_after: <::Header as HeaderT>::Number, select_chain: S, create_inherent_data_providers: CIDP, + can_author_with: CAW, ) -> Self { Self { inner, @@ -259,6 +268,7 @@ where check_inherents_after, select_chain, create_inherent_data_providers: Arc::new(create_inherent_data_providers), + can_author_with, } } @@ -273,6 +283,16 @@ where return Ok(()) } + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "pow", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_data = inherent_data_providers .create_inherent_data() .map_err(|e| Error::CreateInherents(e))?; @@ -297,7 +317,8 @@ where } #[async_trait::async_trait] -impl BlockImport for PowBlockImport +impl BlockImport + for PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -307,6 +328,7 @@ where C::Api: BlockBuilderApi, Algorithm: PowAlgorithm + Send + Sync, Algorithm::Difficulty: 'static + Send, + CAW: CanAuthorWith + Send + Sync, CIDP: CreateInherentDataProviders + Send + Sync, { type Error = ConsensusError; @@ -338,25 +360,23 @@ where if let Some(inner_body) = block.body.take() { let check_block = B::new(block.header.clone(), inner_body); - if !block.state_action.skip_execution_checks() { - self.check_inherents( - check_block.clone(), - BlockId::Hash(parent_hash), - self.create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await?, - block.origin.into(), - ) - .await?; - } + self.check_inherents( + check_block.clone(), + BlockId::Hash(parent_hash), + self.create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await?, + block.origin.into(), + ) + .await?; block.body = Some(check_block.deconstruct().1); } let inner_seal = fetch_seal::(block.post_digests.last(), block.header.hash())?; - let intermediate = block - .remove_intermediate::>(INTERMEDIATE_KEY)?; + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; let difficulty = match intermediate.difficulty { Some(difficulty) => difficulty, @@ -452,7 +472,9 @@ where let intermediate = PowIntermediate:: { difficulty: None }; block.header = checked_header; block.post_digests.push(seal); - block.insert_intermediate(INTERMEDIATE_KEY, intermediate); + block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); block.post_hash = Some(hash); Ok((block, None)) @@ -490,18 +512,19 @@ where /// /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. -pub fn start_mining_worker( +pub fn start_mining_worker( block_import: BoxBlockImport>, client: Arc, select_chain: S, algorithm: Algorithm, mut env: E, - sync_oracle: SO, + mut sync_oracle: SO, justification_sync_link: L, pre_runtime: Option>, create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, + can_author_with: CAW, ) -> ( MiningHandle>::Proof>, impl Future, @@ -518,6 +541,7 @@ where SO: SyncOracle + Clone + Send + Sync + 'static, L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders, + CAW: CanAuthorWith + Clone + Send + 'static, { let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); let worker = MiningHandle::new(algorithm.clone(), block_import, justification_sync_link); @@ -549,6 +573,16 @@ where }; let best_hash = best_header.hash(); + if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { + warn!( + target: "pow", + "Skipping proposal `can_author_with` returned: {} \ + Probably a node update is required!", + err, + ); + continue + } + if worker.best_hash() == Some(best_hash) { continue } diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index a00da6e7022fb..750e78cd9a038 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -32,6 +32,7 @@ use sp_runtime::{ DigestItem, }; use std::{ + borrow::Cow, collections::HashMap, pin::Pin, sync::{ @@ -211,7 +212,10 @@ where let intermediate = PowIntermediate:: { difficulty: Some(build.metadata.difficulty), }; - import_block.insert_intermediate(INTERMEDIATE_KEY, intermediate); + + import_block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); let header = import_block.post_header(); let mut block_import = self.block_import.lock(); diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index fae499ad7c7c6..41a6c1ad5e641 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" futures-timer = "3.0.1" @@ -31,6 +31,7 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 90bfef6c1609c..7c3de13444c1a 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -38,10 +38,14 @@ use log::{debug, info, warn}; use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{CanAuthorWith, Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; -use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, Header as HeaderT}, +}; +use sp_timestamp::Timestamp; use std::{fmt::Debug, ops::Deref, time::Duration}; /// The changes that need to applied to the storage to create the state for a block. @@ -101,8 +105,8 @@ pub trait SimpleSlotWorker { /// Data associated with a slot claim. type Claim: Send + Sync + 'static; - /// Auxiliary data necessary for authoring. - type AuxData: Send + Sync + 'static; + /// Epoch data necessary for authoring. + type EpochData: Send + Sync + 'static; /// The logging target to use when logging messages. fn logging_target(&self) -> &'static str; @@ -110,28 +114,29 @@ pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. fn block_import(&mut self) -> &mut Self::BlockImport; - /// Returns the auxiliary data necessary for authoring. - fn aux_data( + /// Returns the epoch data necessary for authoring. For time-dependent epochs, + /// use the provided slot number as a canonical source of time. + fn epoch_data( &self, header: &B::Header, slot: Slot, - ) -> Result; + ) -> Result; - /// Returns the number of authorities. + /// Returns the number of authorities given the epoch data. /// None indicate that the authorities information is incomplete. - fn authorities_len(&self, aux_data: &Self::AuxData) -> Option; + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. async fn claim_slot( &self, header: &B::Header, slot: Slot, - aux_data: &Self::AuxData, + epoch_data: &Self::EpochData, ) -> Option; /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we /// need to author blocks or not. - fn notify_slot(&self, _header: &B::Header, _slot: Slot, _aux_data: &Self::AuxData) {} + fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec; @@ -144,7 +149,7 @@ pub trait SimpleSlotWorker { body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - epoch: Self::AuxData, + epoch: Self::EpochData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -250,7 +255,7 @@ pub trait SimpleSlotWorker { where Self: Sync, { - let slot = slot_info.slot; + let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); let telemetry = self.telemetry(); let logging_target = self.logging_target(); @@ -267,12 +272,12 @@ pub trait SimpleSlotWorker { Delay::new(proposing_remaining_duration) }; - let aux_data = match self.aux_data(&slot_info.chain_head, slot) { - Ok(aux_data) => aux_data, + let epoch_data = match self.epoch_data(&slot_info.chain_head, slot) { + Ok(epoch_data) => epoch_data, Err(err) => { warn!( target: logging_target, - "Unable to fetch auxiliary data for block {:?}: {}", + "Unable to fetch epoch data at block {:?}: {}", slot_info.chain_head.hash(), err, ); @@ -289,9 +294,9 @@ pub trait SimpleSlotWorker { }, }; - self.notify_slot(&slot_info.chain_head, slot, &aux_data); + self.notify_slot(&slot_info.chain_head, slot, &epoch_data); - let authorities_len = self.authorities_len(&aux_data); + let authorities_len = self.authorities_len(&epoch_data); if !self.force_authoring() && self.sync_oracle().is_offline() && @@ -308,20 +313,29 @@ pub trait SimpleSlotWorker { return None } - let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?; + let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data).await?; if self.should_backoff(slot, &slot_info.chain_head) { return None } - debug!(target: logging_target, "Starting authorship at slot: {slot}"); + debug!( + target: logging_target, + "Starting authorship at slot {}; timestamp = {}", slot, *timestamp, + ); - telemetry!(telemetry; CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => slot); + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "slots.starting_authorship"; + "slot_num" => *slot, + "timestamp" => *timestamp, + ); let proposer = match self.proposer(&slot_info.chain_head).await { Ok(p) => p, Err(err) => { - warn!(target: logging_target, "Unable to author block in slot {slot:?}: {err}"); + warn!(target: logging_target, "Unable to author block in slot {:?}: {}", slot, err,); telemetry!( telemetry; @@ -350,7 +364,7 @@ pub trait SimpleSlotWorker { body.clone(), proposal.storage_changes, claim, - aux_data, + epoch_data, ) .await { @@ -429,46 +443,56 @@ impl + Send + Sync, B: BlockT> /// Slot specific extension that the inherent data provider needs to implement. pub trait InherentDataProviderExt { + /// The current timestamp that will be found in the + /// [`InherentData`](`sp_inherents::InherentData`). + fn timestamp(&self) -> Timestamp; + /// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`). fn slot(&self) -> Slot; } /// Small macro for implementing `InherentDataProviderExt` for inherent data provider tuple. macro_rules! impl_inherent_data_provider_ext_tuple { - ( S $(, $TN:ident)* $( , )?) => { - impl InherentDataProviderExt for (S, $($TN),*) + ( T, S $(, $TN:ident)* $( , )?) => { + impl InherentDataProviderExt for (T, S, $($TN),*) where + T: Deref, S: Deref, { - fn slot(&self) -> Slot { + fn timestamp(&self) -> Timestamp { *self.0.deref() } + + fn slot(&self) -> Slot { + *self.1.deref() + } } } } -impl_inherent_data_provider_ext_tuple!(S); -impl_inherent_data_provider_ext_tuple!(S, A); -impl_inherent_data_provider_ext_tuple!(S, A, B); -impl_inherent_data_provider_ext_tuple!(S, A, B, C); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I); -impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J); +impl_inherent_data_provider_ext_tuple!(T, S); +impl_inherent_data_provider_ext_tuple!(T, S, A); +impl_inherent_data_provider_ext_tuple!(T, S, A, B); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I, J); /// Start a new slot worker. /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub async fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, - sync_oracle: SO, + mut sync_oracle: SO, create_inherent_data_providers: CIDP, + can_author_with: CAW, ) where B: BlockT, C: SelectChain, @@ -476,6 +500,7 @@ pub async fn start_slot_worker( SO: SyncOracle + Send, CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send, { let mut slots = Slots::new(slot_duration.as_duration(), create_inherent_data_providers, client); @@ -493,7 +518,19 @@ pub async fn start_slot_worker( continue } - let _ = worker.on_slot(slot_info).await; + if let Err(err) = + can_author_with.can_author_with(&BlockId::Hash(slot_info.chain_head.hash())) + { + warn!( + target: "slots", + "Unable to author block in slot {},. `can_author_with` returned: {} \ + Probably a node update is required!", + slot_info.slot, + err, + ); + } else { + let _ = worker.on_slot(slot_info).await; + } } } @@ -786,6 +823,7 @@ mod test { super::slots::SlotInfo { slot: slot.into(), duration: SLOT_DURATION, + timestamp: Default::default(), inherent_data: Default::default(), ends_at: Instant::now() + SLOT_DURATION, chain_head: Header::new( @@ -1118,7 +1156,7 @@ mod test { // But lets assert all distances, which we expect to grow linearly until `max_interval + 1` let expected_intervals: Vec<_> = - (0..497).map(|i| (i / 2).clamp(1, expected_distance)).collect(); + (0..497).map(|i| (i / 2).max(1).min(expected_distance)).collect(); assert_eq!(intervals, expected_intervals); } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index f3dc485a8e819..accf24b6b4e78 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -50,6 +50,8 @@ pub fn time_until_next_slot(slot_duration: Duration) -> Duration { pub struct SlotInfo { /// The slot number as found in the inherent data. pub slot: Slot, + /// Current timestamp as found in the inherent data. + pub timestamp: sp_timestamp::Timestamp, /// The instant at which the slot ends. pub ends_at: Instant, /// The inherent data. @@ -70,6 +72,7 @@ impl SlotInfo { /// `ends_at` is calculated using `timestamp` and `duration`. pub fn new( slot: Slot, + timestamp: sp_timestamp::Timestamp, inherent_data: InherentData, duration: Duration, chain_head: B::Header, @@ -77,6 +80,7 @@ impl SlotInfo { ) -> Self { Self { slot, + timestamp, inherent_data, duration, chain_head, @@ -171,6 +175,7 @@ where ); } + let timestamp = inherent_data_providers.timestamp(); let slot = inherent_data_providers.slot(); let inherent_data = inherent_data_providers.create_inherent_data()?; @@ -178,7 +183,14 @@ where if slot > self.last_slot { self.last_slot = slot; - break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)) + break Ok(SlotInfo::new( + slot, + timestamp, + inherent_data, + self.slot_duration, + chain_head, + None, + )) } } } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index c12bf933f6bb1..3b6402b3f6023 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -17,13 +17,13 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } hash-db = "0.15.2" -kvdb = "0.12.0" -kvdb-memorydb = "0.12.0" -kvdb-rocksdb = { version = "0.16.0", optional = true } +kvdb = "0.11.0" +kvdb-memorydb = "0.11.0" +kvdb-rocksdb = { version = "0.15.2", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" -parity-db = "0.4.2" -parking_lot = "0.12.1" +parity-db = "0.3.13" +parking_lot = "0.12.0" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } sp-arithmetic = { version = "5.0.0", path = "../../primitives/arithmetic" } @@ -35,12 +35,9 @@ sp-state-machine = { version = "0.12.0", path = "../../primitives/state-machine" sp-trie = { version = "6.0.0", path = "../../primitives/trie" } [dev-dependencies] -criterion = "0.3.3" -kvdb-rocksdb = "0.16.0" -rand = "0.8.4" -tempfile = "3.1.0" +kvdb-rocksdb = "0.15.1" quickcheck = { version = "1.0.3", default-features = false } -kitchensink-runtime = { path = "../../bin/node/runtime" } +tempfile = "3" sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } @@ -49,10 +46,3 @@ default = [] test-helpers = [] runtime-benchmarks = [] rocksdb = ["kvdb-rocksdb"] - -[[bench]] -name = "state_access" -harness = false - -[lib] -bench = false diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs deleted file mode 100644 index bab79fe7c90db..0000000000000 --- a/client/db/benches/state_access.rs +++ /dev/null @@ -1,311 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; -use rand::{distributions::Uniform, rngs::StdRng, Rng, SeedableRng}; -use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBackend}; -use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; -use sp_core::H256; -use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, - StateVersion, Storage, -}; -use tempfile::TempDir; - -pub(crate) type Block = RawBlock>; - -fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { - let mut op = db.begin_operation().unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - header.state_root = op - .set_genesis_state( - Storage { - top: vec![( - sp_core::storage::well_known_keys::CODE.to_vec(), - kitchensink_runtime::wasm_binary_unwrap().to_vec(), - )] - .into_iter() - .collect(), - children_default: Default::default(), - }, - true, - StateVersion::V1, - ) - .unwrap(); - - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - db.commit_operation(op).unwrap(); - - let mut number = 1; - let mut parent_hash = header.hash(); - - for i in 0..10 { - let mut op = db.begin_operation().unwrap(); - - db.begin_state_operation(&mut op, parent_hash).unwrap(); - - let mut header = Header { - number, - parent_hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let changes = storage - .iter() - .skip(i * 100_000) - .take(100_000) - .map(|(k, v)| (k.clone(), Some(v.clone()))) - .collect::>(); - - let (state_root, tx) = db.state_at(parent_hash).unwrap().storage_root( - changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), - StateVersion::V1, - ); - header.state_root = state_root; - - op.update_db_storage(tx).unwrap(); - op.update_storage(changes.clone(), Default::default()).unwrap(); - - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) - .unwrap(); - - db.commit_operation(op).unwrap(); - - number += 1; - parent_hash = header.hash(); - } - - parent_hash -} - -enum BenchmarkConfig { - NoCache, - TrieNodeCache, -} - -fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend { - let path = temp_dir.path().to_owned(); - - let trie_cache_maximum_size = match config { - BenchmarkConfig::NoCache => None, - BenchmarkConfig::TrieNodeCache => Some(2 * 1024 * 1024 * 1024), - }; - - let settings = DatabaseSettings { - trie_cache_maximum_size, - state_pruning: Some(PruningMode::ArchiveAll), - source: DatabaseSource::ParityDb { path }, - blocks_pruning: BlocksPruning::KeepAll, - }; - - Backend::new(settings, 100).expect("Creates backend") -} - -/// Generate the storage that will be used for the benchmark -/// -/// Returns the `Vec` and the `Vec<(key, value)>` -fn generate_storage() -> (Vec>, Vec<(Vec, Vec)>) { - let mut rng = StdRng::seed_from_u64(353893213); - - let mut storage = Vec::new(); - let mut keys = Vec::new(); - - for _ in 0..1_000_000 { - let key_len: usize = rng.gen_range(32..128); - let key = (&mut rng) - .sample_iter(Uniform::new_inclusive(0, 255)) - .take(key_len) - .collect::>(); - - let value_len: usize = rng.gen_range(20..60); - let value = (&mut rng) - .sample_iter(Uniform::new_inclusive(0, 255)) - .take(value_len) - .collect::>(); - - keys.push(key.clone()); - storage.push((key, value)); - } - - (keys, storage) -} - -fn state_access_benchmarks(c: &mut Criterion) { - sp_tracing::try_init_simple(); - - let (keys, storage) = generate_storage(); - let path = TempDir::new().expect("Creates temporary directory"); - - let block_hash = { - let backend = create_backend(BenchmarkConfig::NoCache, &path); - insert_blocks(&backend, storage.clone()) - }; - - let mut group = c.benchmark_group("Reading entire state"); - group.sample_size(20); - - let mut bench_multiple_values = |config, desc, multiplier| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - for key in keys.iter().cycle().take(keys.len() * multiplier) { - let _ = state.storage(&key).expect("Doesn't fail").unwrap(); - } - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_multiple_values( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading each key once", - 1, - ); - bench_multiple_values(BenchmarkConfig::NoCache, "no cache and reading each key once", 1); - - bench_multiple_values( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading 4 times each key in a row", - 4, - ); - bench_multiple_values( - BenchmarkConfig::NoCache, - "no cache and reading 4 times each key in a row", - 4, - ); - - group.finish(); - - let mut group = c.benchmark_group("Reading a single value"); - - let mut bench_single_value = |config, desc, multiplier| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - for key in keys.iter().take(1).cycle().take(multiplier) { - let _ = state.storage(&key).expect("Doesn't fail").unwrap(); - } - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading the key once", - 1, - ); - bench_single_value(BenchmarkConfig::NoCache, "no cache and reading the key once", 1); - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and reading 4 times each key in a row", - 4, - ); - bench_single_value( - BenchmarkConfig::NoCache, - "no cache and reading 4 times each key in a row", - 4, - ); - - group.finish(); - - let mut group = c.benchmark_group("Hashing a value"); - - let mut bench_single_value = |config, desc, multiplier| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - for key in keys.iter().take(1).cycle().take(multiplier) { - let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); - } - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and hashing the key once", - 1, - ); - bench_single_value(BenchmarkConfig::NoCache, "no cache and hashing the key once", 1); - - bench_single_value( - BenchmarkConfig::TrieNodeCache, - "with trie node cache and hashing 4 times each key in a row", - 4, - ); - bench_single_value( - BenchmarkConfig::NoCache, - "no cache and hashing 4 times each key in a row", - 4, - ); - - group.finish(); - - let mut group = c.benchmark_group("Hashing `:code`"); - - let mut bench_single_value = |config, desc| { - let backend = create_backend(config, &path); - - group.bench_function(desc, |b| { - b.iter_batched( - || backend.state_at(block_hash).expect("Creates state"), - |state| { - let _ = state - .storage_hash(sp_core::storage::well_known_keys::CODE) - .expect("Doesn't fail") - .unwrap(); - }, - BatchSize::SmallInput, - ) - }); - }; - - bench_single_value(BenchmarkConfig::TrieNodeCache, "with trie node cache"); - bench_single_value(BenchmarkConfig::NoCache, "no cache"); - - group.finish(); -} - -criterion_group!(benches, state_access_benchmarks); -criterion_main!(benches); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 13d91fff0b555..d3d43e742d026 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -18,7 +18,13 @@ //! State backend that's useful for benchmarking -use crate::{DbState, DbStateBuilder}; +use std::{ + cell::{Cell, RefCell}, + collections::HashMap, + sync::Arc, +}; + +use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; use hash_db::{Hasher, Prefix}; use kvdb::{DBTransaction, KeyValueDB}; use linked_hash_map::LinkedHashMap; @@ -31,31 +37,40 @@ use sp_runtime::{ StateVersion, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, StorageCollection, -}; -use sp_trie::{ - cache::{CacheSize, SharedTrieCache}, - prefixed_key, MemoryDB, -}; -use std::{ - cell::{Cell, RefCell}, - collections::HashMap, - sync::Arc, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, + StorageCollection, }; +use sp_trie::{prefixed_key, MemoryDB}; + +type DbState = + sp_state_machine::TrieBackend>>, HashFor>; -type State = DbState; +type State = CachingState, B>; struct StorageDb { db: Arc, + proof_recorder: Option>, _block: std::marker::PhantomData, } impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); - self.db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e)) + if let Some(recorder) = &self.proof_recorder { + if let Some(v) = recorder.get(key) { + return Ok(v) + } + let backend_value = self + .db + .get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e))?; + recorder.record(*key, backend_value.clone()); + Ok(backend_value) + } else { + self.db + .get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } } @@ -67,6 +82,7 @@ pub struct BenchmarkingState { db: Cell>>, genesis: HashMap, (Vec, i32)>, record: Cell>>, + shared_cache: SharedCache, // shared cache is always empty /// Key tracker for keys in the main trie. /// We track the total number of reads and writes to these keys, /// not de-duplicated for repeats. @@ -77,10 +93,9 @@ pub struct BenchmarkingState { /// not de-duplicated for repeats. child_key_tracker: RefCell, LinkedHashMap, TrackedStorageKey>>>, whitelist: RefCell>, - proof_recorder: Option>>, + proof_recorder: Option>, proof_recorder_root: Cell, enable_tracking: bool, - shared_trie_cache: SharedTrieCache>, } impl BenchmarkingState { @@ -94,7 +109,7 @@ impl BenchmarkingState { let state_version = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); + sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); let mut state = BenchmarkingState { state: RefCell::new(None), @@ -103,20 +118,19 @@ impl BenchmarkingState { genesis: Default::default(), genesis_root: Default::default(), record: Default::default(), + shared_cache: new_shared_cache(0, (1, 10)), main_key_tracker: Default::default(), child_key_tracker: Default::default(), whitelist: Default::default(), proof_recorder: record_proof.then(Default::default), proof_recorder_root: Cell::new(root), enable_tracking, - // Enable the cache, but do not sync anything to the shared state. - shared_trie_cache: SharedTrieCache::new(CacheSize::Maximum(0)), }; state.add_whitelist_to_tracker(); state.reopen()?; - let child_delta = genesis.children_default.values().map(|child_content| { + let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), @@ -146,13 +160,16 @@ impl BenchmarkingState { recorder.reset(); self.proof_recorder_root.set(self.root.get()); } - let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); - *self.state.borrow_mut() = Some( - DbStateBuilder::::new(storage_db, self.root.get()) - .with_optional_recorder(self.proof_recorder.clone()) - .with_cache(self.shared_trie_cache.local_cache()) - .build(), - ); + let storage_db = Arc::new(StorageDb:: { + db, + proof_recorder: self.proof_recorder.clone(), + _block: Default::default(), + }); + *self.state.borrow_mut() = Some(State::new( + DbState::::new(storage_db, self.root.get()), + self.shared_cache.clone(), + None, + )); Ok(()) } @@ -307,19 +324,6 @@ impl StateBackend> for BenchmarkingState { .child_storage(child_info, key) } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.add_read_key(Some(child_info.storage_key()), key); - self.state - .borrow() - .as_ref() - .ok_or_else(state_err)? - .child_storage_hash(child_info, key) - } - fn exists_storage(&self, key: &[u8]) -> Result { self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) @@ -600,25 +604,22 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| { let proof_size = recorder.estimate_encoded_size() as u32; - let proof = recorder.to_storage_proof(); - let proof_recorder_root = self.proof_recorder_root.get(); if proof_recorder_root == Default::default() || proof_size == 1 { // empty trie proof_size + } else if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) + { + size as u32 } else { - if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { - size as u32 - } else { - panic!( - "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", - self.proof_recorder_root.get(), - self.root.get(), - self.genesis_root, - proof_size, - ); - } + panic!( + "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", + self.proof_recorder_root.get(), + self.root.get(), + self.genesis_root, + proof_size, + ); } }) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3bbff1625f2f9..7dd49f9831f1c 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -34,8 +34,8 @@ pub mod bench; mod children; mod parity_db; -mod record_stats_state; mod stats; +mod storage_cache; #[cfg(any(feature = "rocksdb", test))] mod upgrade; mod utils; @@ -51,19 +51,19 @@ use std::{ }; use crate::{ - record_stats_state::RecordStatsState, stats::StateUsageStats, + storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, }; use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, - leaves::{FinalizationOutcome, LeafSet}, + leaves::{FinalizationDisplaced, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, }; -use sc_state_db::{IsPruned, StateDb}; +use sc_state_db::StateDb; use sp_arithmetic::traits::Saturating; use sp_blockchain::{ well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, @@ -83,11 +83,10 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - backend::{AsTrieBackend, Backend as StateBackend}, - ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, - StorageCollection, UsageInfo as StateUsageInfo, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, IndexOperation, + OffchainChangesCollection, StateMachineStats, StorageCollection, UsageInfo as StateUsageInfo, }; -use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; +use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -97,16 +96,13 @@ pub use bench::BenchmarkingState; const CACHE_HEADERS: usize = 8; +/// Default value for storage cache child ratio. +const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); + /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend>>, HashFor>; -/// Builder for [`DbState`]. -pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder< - Arc>>, - HashFor, ->; - /// Length of a [`DbHash`]. const DB_HASH_LEN: usize = 32; @@ -178,14 +174,6 @@ impl StateBackend> for RefTrackingState { self.state.child_storage(child_info, key) } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.state.child_storage_hash(child_info, key) - } - fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } @@ -284,6 +272,12 @@ impl StateBackend> for RefTrackingState { self.state.child_keys(child_info, prefix) } + fn as_trie_backend( + &self, + ) -> Option<&sp_state_machine::TrieBackend>> { + self.state.as_trie_backend() + } + fn register_overlay_stats(&self, stats: &StateMachineStats) { self.state.register_overlay_stats(stats); } @@ -293,22 +287,12 @@ impl StateBackend> for RefTrackingState { } } -impl AsTrieBackend> for RefTrackingState { - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; - - fn as_trie_backend( - &self, - ) -> &sp_state_machine::TrieBackend> { - &self.state.as_trie_backend() - } -} - /// Database settings. pub struct DatabaseSettings { - /// The maximum trie cache size in bytes. - /// - /// If `None` is given, the cache is disabled. - pub trie_cache_maximum_size: Option, + /// State cache size. + pub state_cache_size: usize, + /// Ratio of cache size dedicated to child tries. + pub state_cache_child_ratio: Option<(usize, usize)>, /// Requested state pruning mode. pub state_pruning: Option, /// Where to find the database. @@ -316,16 +300,14 @@ pub struct DatabaseSettings { /// Block pruning mode. /// /// NOTE: only finalized blocks are subject for removal! - pub blocks_pruning: BlocksPruning, + pub keep_blocks: KeepBlocks, } /// Block pruning settings. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum BlocksPruning { - /// Keep full block history, of every block that was ever imported. - KeepAll, - /// Keep full finalized block history. - KeepFinalized, +#[derive(Debug, Clone, Copy)] +pub enum KeepBlocks { + /// Keep full block history. + All, /// Keep N recent finalized blocks. Some(u32), } @@ -444,10 +426,9 @@ struct PendingBlock { } // wrapper that implements trait required for state_db -#[derive(Clone)] -struct StateMetaDb(Arc>); +struct StateMetaDb<'a>(&'a dyn Database); -impl sc_state_db::MetaDb for StateMetaDb { +impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { type Error = sp_database::error::DatabaseError; fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { @@ -577,10 +558,8 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } impl sc_client_api::blockchain::Backend for BlockchainDb { - fn body(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(body) = - read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(hash))? - { + fn body(&self, id: BlockId) -> ClientResult>> { + if let Some(body) = read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { // Plain body match Decode::decode(&mut &body[..]) { Ok(body) => return Ok(Some(body)), @@ -592,12 +571,7 @@ impl sc_client_api::blockchain::Backend for BlockchainDb(hash), - )? { + if let Some(index) = read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { match Vec::>::decode(&mut &index[..]) { Ok(index) => { let mut body = Vec::new(); @@ -642,13 +616,8 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult> { - match read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::JUSTIFICATIONS, - BlockId::::Hash(hash), - )? { + fn justifications(&self, id: BlockId) -> ClientResult> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, id)? { Some(justifications) => match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), Err(err) => @@ -686,21 +655,16 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult>> { + fn indexed_transaction(&self, hash: &Block::Hash) -> ClientResult>> { Ok(self.db.get(columns::TRANSACTION, hash.as_ref())) } - fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult { + fn has_indexed_transaction(&self, hash: &Block::Hash) -> ClientResult { Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) } - fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult>>> { - let body = match read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::BODY_INDEX, - BlockId::::Hash(hash), - )? { + fn block_indexed_body(&self, id: BlockId) -> ClientResult>>> { + let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { Some(body) => body, None => return Ok(None), }; @@ -766,15 +730,15 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { - old_state: RecordStatsState, Block>, + old_state: SyncingCachingState, Block>, db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(Block::Hash, Option)>, - set_head: Option, + finalized_blocks: Vec<(BlockId, Option)>, + set_head: Option>, commit_state: bool, index_ops: Vec, } @@ -815,7 +779,7 @@ impl BlockImportOperation { return Err(sp_blockchain::Error::InvalidState) } - let child_delta = storage.children_default.values().map(|child_content| { + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), @@ -836,7 +800,7 @@ impl BlockImportOperation { impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { - type State = RecordStatsState, Block>; + type State = SyncingCachingState, Block>; fn state(&self) -> ClientResult> { Ok(Some(&self.old_state)) @@ -914,16 +878,16 @@ impl sc_client_api::backend::BlockImportOperation fn mark_finalized( &mut self, - block: Block::Hash, + block: BlockId, justification: Option, ) -> ClientResult<()> { self.finalized_blocks.push((block, justification)); Ok(()) } - fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> { + fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(hash); + self.set_head = Some(block); Ok(()) } @@ -935,7 +899,7 @@ impl sc_client_api::backend::BlockImportOperation struct StorageDb { pub db: Arc>, - pub state_db: StateDb, StateMetaDb>, + pub state_db: StateDb>, prefix_keys: bool, } @@ -985,7 +949,7 @@ impl EmptyStorage { let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); // both triedbmut are the same on empty storage. - sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); + sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); EmptyStorage(root) } } @@ -1045,13 +1009,13 @@ pub struct Backend { offchain_storage: offchain::LocalStorage, blockchain: BlockchainDb, canonicalization_delay: u64, + shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, - blocks_pruning: BlocksPruning, + keep_blocks: KeepBlocks, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, - shared_trie_cache: Option>>, } impl Backend { @@ -1079,28 +1043,21 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) + pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { + Self::new_test_with_tx_storage(keep_blocks, canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage( - blocks_pruning: BlocksPruning, - canonicalization_delay: u64, - ) -> Self { + pub fn new_test_with_tx_storage(keep_blocks: u32, canonicalization_delay: u64) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); - let state_pruning = match blocks_pruning { - BlocksPruning::KeepAll => PruningMode::ArchiveAll, - BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical, - BlocksPruning::Some(n) => PruningMode::blocks_pruning(n), - }; let db_setting = DatabaseSettings { - trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(state_pruning), + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + state_pruning: Some(PruningMode::keep_blocks(keep_blocks)), source: DatabaseSource::Custom { db, require_create_flag: true }, - blocks_pruning, + keep_blocks: KeepBlocks::Some(keep_blocks), }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1132,11 +1089,11 @@ impl Backend { let mut db_init_transaction = Transaction::new(); let requested_state_pruning = config.state_pruning.clone(); - let state_meta_db = StateMetaDb(db.clone()); + let state_meta_db = StateMetaDb(db.as_ref()); let map_e = sp_blockchain::Error::from_state_db; let (state_db_init_commit_set, state_db) = StateDb::open( - state_meta_db, + &state_meta_db, requested_state_pruning, !db.supports_ref_counting(), should_init, @@ -1159,15 +1116,16 @@ impl Backend { offchain_storage, blockchain, canonicalization_delay, + shared_cache: new_shared_cache( + config.state_cache_size, + config.state_cache_child_ratio.unwrap_or(DEFAULT_CHILD_RATIO), + ), import_lock: Default::default(), is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), - blocks_pruning: config.blocks_pruning, + keep_blocks: config.keep_blocks, genesis_state: RwLock::new(None), - shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| { - SharedTrieCache::new(sp_trie::cache::CacheSize::Maximum(maximum_size)) - }), }; // Older DB versions have no last state key. Check if the state is available and set it. @@ -1176,7 +1134,7 @@ impl Backend { info.finalized_hash != Default::default() && sc_client_api::Backend::have_state_at( &backend, - info.finalized_hash, + &info.finalized_hash, info.finalized_number, ) { backend.blockchain.update_meta(MetaUpdate { @@ -1236,7 +1194,7 @@ impl Backend { (&r.number, &r.hash) ); - return Err(sp_blockchain::Error::NotInFinalizedChain) + return Err(::sp_blockchain::Error::NotInFinalizedChain) } retracted.push(r.hash); @@ -1289,18 +1247,18 @@ impl Backend { fn finalize_block_with_transaction( &self, transaction: &mut Transaction, - hash: Block::Hash, + hash: &Block::Hash, header: &Block::Header, last_finalized: Option, justification: Option, - finalization_displaced: &mut Option>>, + finalization_displaced: &mut Option>>, ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); self.ensure_sequential_finalization(header, last_finalized)?; let with_state = sc_client_api::Backend::have_state_at(self, hash, number); - self.note_finalized(transaction, header, hash, finalization_displaced, with_state)?; + self.note_finalized(transaction, header, *hash, finalization_displaced, with_state)?; if let Some(justification) = justification { transaction.set_from_vec( @@ -1309,7 +1267,7 @@ impl Backend { Justifications::from(justification).encode(), ); } - Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state }) + Ok(MetaUpdate { hash: *hash, number, is_best: false, is_finalized: true, with_state }) } // performs forced canonicalization with a delay after importing a non-finalized block. @@ -1340,16 +1298,15 @@ impl Backend { )) })? }; - if !sc_client_api::Backend::have_state_at(self, hash, new_canonical.saturated_into()) { + if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) { return Ok(()) } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; + let commit = + self.storage.state_db.canonicalize_block(&hash).map_err( + sp_blockchain::Error::from_state_db::>, + )?; apply_state_commit(transaction, commit); } Ok(()) @@ -1368,11 +1325,12 @@ impl Backend { (meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap) }; - for (block_hash, justification) in operation.finalized_blocks { + for (block, justification) in operation.finalized_blocks { + let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, - block_hash, + &block_hash, &block_header, Some(last_finalized_hash), justification, @@ -1387,21 +1345,16 @@ impl Backend { let parent_hash = *pending_block.header.parent_hash(); let number = *pending_block.header.number(); - let highest_leaf = self - .blockchain - .leaves - .read() - .highest_leaf() - .map(|(n, _)| n) - .unwrap_or(Zero::zero()); let existing_header = - number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some(); + number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; - if pending_block.leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; + let (enacted, retracted) = if pending_block.leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? + } else { + (Default::default(), Default::default()) }; utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; @@ -1499,16 +1452,14 @@ impl Backend { .storage .state_db .insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset) - .map_err(|e: sc_state_db::Error| { + .map_err(|e: sc_state_db::Error| { sp_blockchain::Error::from_state_db(e) })?; apply_state_commit(&mut transaction, commit); if number <= last_finalized_num { // Canonicalize in the db when re-importing existing blocks with state. let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, + sp_blockchain::Error::from_state_db::>, )?; apply_state_commit(&mut transaction, commit); meta_updates.push(MetaUpdate { @@ -1530,22 +1481,14 @@ impl Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); - debug!( - target: "db", + debug!(target: "db", "DB Commit {:?} ({}), best={}, state={}, existing={}, finalized={}", - hash, - number, - is_best, - operation.commit_state, - existing_header, - finalized, + hash, number, is_best, operation.commit_state, existing_header, finalized, ); self.state_usage.merge_sm(operation.old_state.usage_info()); - // release state reference so that it can be finalized - // VERY IMPORTANT - drop(operation.old_state); + let cache = operation.old_state.into_cache_changes(); if finalized { // TODO: ensure best chain contains this block. @@ -1634,21 +1577,20 @@ impl Backend { is_finalized: finalized, with_state: operation.commit_state, }); - Some((pending_block.header, hash)) + Some((pending_block.header, number, hash, enacted, retracted, is_best, cache)) } else { None }; - if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client_api::blockchain::HeaderBackend::header( - &self.blockchain, - BlockId::Hash(set_head), - )? { + let cache_update = if let Some(set_head) = operation.set_head { + if let Some(header) = + sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? + { let number = header.number(); let hash = header.hash(); - self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; - + let (enacted, retracted) = + self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; meta_updates.push(MetaUpdate { hash, number: *number, @@ -1656,24 +1598,40 @@ impl Backend { is_finalized: false, with_state: false, }); + Some((enacted, retracted)) } else { return Err(sp_blockchain::Error::UnknownBlock(format!( "Cannot set head {:?}", set_head ))) } - } + } else { + None + }; self.storage.db.commit(transaction)?; // Apply all in-memory state changes. // Code beyond this point can't fail. - if let Some((header, hash)) = imported { + if let Some((header, number, hash, enacted, retracted, is_best, mut cache)) = imported { trace!(target: "db", "DB Commit done {:?}", hash); let header_metadata = CachedHeaderMetadata::from(&header); self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata); cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); + cache.sync_cache( + &enacted, + &retracted, + operation.storage_updates, + operation.child_storage_updates, + Some(hash), + Some(number), + is_best, + ); + } + + if let Some((enacted, retracted)) = cache_update { + self.shared_cache.write().sync(&enacted, &retracted); } for m in meta_updates { @@ -1692,7 +1650,7 @@ impl Backend { transaction: &mut Transaction, f_header: &Block::Header, f_hash: Block::Hash, - displaced: &mut Option>>, + displaced: &mut Option>>, with_state: bool, ) -> ClientResult<()> { let f_num = *f_header.number(); @@ -1703,18 +1661,17 @@ impl Backend { } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && + if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) && self.storage .state_db .best_canonical() .map(|c| f_num.saturated_into::() > c) .unwrap_or(true) { - let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( - sp_blockchain::Error::from_state_db::< - sc_state_db::Error, - >, - )?; + let commit = + self.storage.state_db.canonicalize_block(&f_hash).map_err( + sp_blockchain::Error::from_state_db::>, + )?; apply_state_commit(transaction, commit); } @@ -1732,49 +1689,34 @@ impl Backend { &self, transaction: &mut Transaction, finalized: NumberFor, - displaced: &FinalizationOutcome>, + displaced: &FinalizationDisplaced>, ) -> ClientResult<()> { - match self.blocks_pruning { - BlocksPruning::KeepAll => {}, - BlocksPruning::Some(blocks_pruning) => { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized >= keep.into() { - let number = finalized.saturating_sub(keep.into()); - self.prune_block(transaction, BlockId::::number(number))?; - } - self.prune_displaced_branches(transaction, finalized, displaced)?; - }, - BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, finalized, displaced)?; - }, - } - Ok(()) - } + if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { + // Always keep the last finalized block + let keep = std::cmp::max(keep_blocks, 1); + if finalized >= keep.into() { + let number = finalized.saturating_sub(keep.into()); + self.prune_block(transaction, BlockId::::number(number))?; + } - fn prune_displaced_branches( - &self, - transaction: &mut Transaction, - finalized: NumberFor, - displaced: &FinalizationOutcome>, - ) -> ClientResult<()> { - // Discard all blocks from displaced branches - for h in displaced.leaves() { - let mut number = finalized; - let mut hash = *h; - // Follow displaced chains back until we reach a finalized block. - // Since leaves are discarded due to finality, they can't have parents - // that are canonical, but not yet finalized. So we stop deleting as soon as - // we reach canonical chain. - while self.blockchain.hash(number)? != Some(hash) { - let id = BlockId::::hash(hash); - match self.blockchain.header(id)? { - Some(header) => { - self.prune_block(transaction, id)?; - number = header.number().saturating_sub(One::one()); - hash = *header.parent_hash(); - }, - None => break, + // Also discard all blocks from displaced branches + for h in displaced.leaves() { + let mut number = finalized; + let mut hash = *h; + // Follow displaced chains back until we reach a finalized block. + // Since leaves are discarded due to finality, they can't have parents + // that are canonical, but not yet finalized. So we stop deleting as soon as + // we reach canonical chain. + while self.blockchain.hash(number)? != Some(hash) { + let id = BlockId::::hash(hash); + match self.blockchain.header(id)? { + Some(header) => { + self.prune_block(transaction, id)?; + number = header.number().saturating_sub(One::one()); + hash = *header.parent_hash(); + }, + None => break, + } } } } @@ -1794,13 +1736,6 @@ impl Backend { columns::BODY, id, )?; - utils::remove_from_db( - transaction, - &*self.storage.db, - columns::KEY_LOOKUP, - columns::JUSTIFICATIONS, - id, - )?; if let Some(index) = read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { @@ -1828,13 +1763,17 @@ impl Backend { Ok(()) } - fn empty_state(&self) -> ClientResult, Block>> { + fn empty_state(&self) -> ClientResult, Block>> { let root = EmptyStorage::::new().0; // Empty trie - let db_state = DbStateBuilder::::new(self.storage.clone(), root) - .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) - .build(); + let db_state = DbState::::new(self.storage.clone(), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - Ok(RecordStatsState::new(state, None, self.state_usage.clone())) + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); + Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )) } } @@ -1956,13 +1895,16 @@ where impl sc_client_api::backend::Backend for Backend { type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; - type State = RecordStatsState, Block>; + type State = SyncingCachingState, Block>; type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { + let mut old_state = self.empty_state()?; + old_state.disable_syncing(); + Ok(BlockImportOperation { pending_block: None, - old_state: self.empty_state()?, + old_state, db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), @@ -1978,13 +1920,14 @@ impl sc_client_api::backend::Backend for Backend { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: Block::Hash, + block: BlockId, ) -> ClientResult<()> { - if block == Default::default() { + if block.is_pre_genesis() { operation.old_state = self.empty_state()?; } else { operation.old_state = self.state_at(block)?; } + operation.old_state.disable_syncing(); operation.commit_state = true; Ok(()) @@ -1994,30 +1937,31 @@ impl sc_client_api::backend::Backend for Backend { let usage = operation.old_state.usage_info(); self.state_usage.merge_sm(usage); - if let Err(e) = self.try_commit_operation(operation) { - let state_meta_db = StateMetaDb(self.storage.db.clone()); - self.storage - .state_db - .reset(state_meta_db) - .map_err(sp_blockchain::Error::from_state_db)?; - Err(e) - } else { - Ok(()) + match self.try_commit_operation(operation) { + Ok(_) => { + self.storage.state_db.apply_pending(); + Ok(()) + }, + e @ Err(_) => { + self.storage.state_db.revert_pending(); + e + }, } } fn finalize_block( &self, - hash: Block::Hash, + block: BlockId, justification: Option, ) -> ClientResult<()> { let mut transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(hash))?; + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + let header = self.blockchain.expect_header(block)?; let mut displaced = None; let m = self.finalize_block_with_transaction( &mut transaction, - hash, + &hash, &header, None, justification, @@ -2030,11 +1974,12 @@ impl sc_client_api::backend::Backend for Backend { fn append_justification( &self, - hash: Block::Hash, + block: BlockId, justification: Justification, ) -> ClientResult<()> { let mut transaction: Transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(hash))?; + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + let header = self.blockchain.expect_header(block)?; let number = *header.number(); // Check if the block is finalized first. @@ -2049,7 +1994,7 @@ impl sc_client_api::backend::Backend for Backend { } let justifications = if let Some(mut stored_justifications) = - self.blockchain.justifications(hash)? + self.blockchain.justifications(block)? { if !stored_justifications.append(justification) { return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) @@ -2083,9 +2028,8 @@ impl sc_client_api::backend::Backend for Backend { ) }); let database_cache = MemorySize::from_bytes(0); - let state_cache = MemorySize::from_bytes( - self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()), - ); + let state_cache = + MemorySize::from_bytes(self.shared_cache.read().used_storage_cache_size()); let state_db = self.storage.state_db.memory_info(); Some(UsageInfo { @@ -2113,48 +2057,38 @@ impl sc_client_api::backend::Backend for Backend { ) -> ClientResult<(NumberFor, HashSet)> { let mut reverted_finalized = HashSet::new(); - let info = self.blockchain.info(); + let mut best_number = self.blockchain.info().best_number; + let mut best_hash = self.blockchain.info().best_hash; - let highest_leaf = self - .blockchain - .leaves - .read() - .highest_leaf() - .and_then(|(n, h)| h.last().map(|h| (n, *h))); - - let best_number = info.best_number; - let best_hash = info.best_hash; - - let finalized = info.finalized_number; + let finalized = self.blockchain.info().finalized_number; let revertible = best_number - finalized; let n = if !revert_finalized && revertible < n { revertible } else { n }; - let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf { - Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h), - None => (n, best_number, best_hash), - }; - let mut revert_blocks = || -> ClientResult> { for c in 0..n.saturated_into::() { - if number_to_revert.is_zero() { + if best_number.is_zero() { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed = - self.blockchain.header(BlockId::Hash(hash_to_revert))?.ok_or_else(|| { + self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block header not found.", - hash_to_revert, + "Error reverting to {}. Block hash not found.", + best_number )) })?; let removed_hash = removed.hash(); - let prev_number = number_to_revert.saturating_sub(One::one()); - let prev_hash = - if prev_number == best_number { best_hash } else { *removed.parent_hash() }; + let prev_number = best_number.saturating_sub(One::one()); + let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; - if !self.have_state_at(prev_hash, prev_number) { + if !self.have_state_at(&prev_hash, prev_number) { return Ok(c.saturated_into::>()) } @@ -2162,15 +2096,12 @@ impl sc_client_api::backend::Backend for Backend { Some(commit) => { apply_state_commit(&mut transaction, commit); - number_to_revert = prev_number; - hash_to_revert = prev_hash; + best_number = prev_number; + best_hash = prev_hash; - let update_finalized = number_to_revert < finalized; + let update_finalized = best_number < finalized; - let key = utils::number_and_hash_to_lookup_key( - number_to_revert, - &hash_to_revert, - )?; + let key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?; if update_finalized { transaction.set_from_vec( columns::META, @@ -2180,14 +2111,12 @@ impl sc_client_api::backend::Backend for Backend { reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { - if hash == hash_to_revert { - if !number_to_revert.is_zero() && - self.have_state_at( - prev_hash, - number_to_revert - One::one(), - ) { + if hash == best_hash { + if !best_number.is_zero() && + self.have_state_at(&prev_hash, best_number - One::one()) + { let lookup_key = utils::number_and_hash_to_lookup_key( - number_to_revert - One::one(), + best_number - One::one(), prev_hash, )?; transaction.set_from_vec( @@ -2208,16 +2137,13 @@ impl sc_client_api::backend::Backend for Backend { &mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, - hash_to_revert, + best_hash, ); self.storage.db.commit(transaction)?; - - let is_best = number_to_revert < best_number; - self.blockchain.update_meta(MetaUpdate { - hash: hash_to_revert, - number: number_to_revert, - is_best, + hash: best_hash, + number: best_number, + is_best: true, is_finalized: update_finalized, with_state: false, }); @@ -2235,7 +2161,7 @@ impl sc_client_api::backend::Backend for Backend { let mut transaction = Transaction::new(); let mut leaves = self.blockchain.leaves.write(); - leaves.revert(hash_to_revert, number_to_revert); + leaves.revert(best_hash, best_number); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); self.storage.db.commit(transaction)?; @@ -2247,14 +2173,14 @@ impl sc_client_api::backend::Backend for Backend { Ok((reverted, reverted_finalized)) } - fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> { + fn remove_leaf_block(&self, hash: &Block::Hash) -> ClientResult<()> { let best_hash = self.blockchain.info().best_hash; - if best_hash == hash { + if best_hash == *hash { return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) } - let hdr = self.blockchain.header_metadata(hash)?; + let hdr = self.blockchain.header_metadata(*hash)?; if !self.have_state_at(hash, hdr.number) { return Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2263,7 +2189,7 @@ impl sc_client_api::backend::Backend for Backend { } let mut leaves = self.blockchain.leaves.write(); - if !leaves.contains(hdr.number, hash) { + if !leaves.contains(hdr.number, *hash) { return Err(sp_blockchain::Error::Backend(format!( "Can't remove non-leaf block {:?}", hash @@ -2271,45 +2197,14 @@ impl sc_client_api::backend::Backend for Backend { } let mut transaction = Transaction::new(); - if let Some(commit) = self.storage.state_db.remove(&hash) { + if let Some(commit) = self.storage.state_db.remove(hash) { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - - let children: Vec<_> = self - .blockchain() - .children(hdr.parent)? - .into_iter() - .filter(|child_hash| *child_hash != hash) - .collect(); - let parent_leaf = if children.is_empty() { - children::remove_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - hdr.parent, - ); - Some(hdr.parent) - } else { - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - hdr.parent, - children, - ); - None - }; - - let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf); + leaves.revert(*hash, hdr.number); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - if let Err(e) = self.storage.db.commit(transaction) { - if let Some(outcome) = remove_outcome { - leaves.undo().undo_remove(outcome); - } - return Err(e.into()) - } - self.blockchain().remove_header_metadata(hash); + self.storage.db.commit(transaction)?; + self.blockchain().remove_header_metadata(*hash); Ok(()) } @@ -2317,41 +2212,61 @@ impl sc_client_api::backend::Backend for Backend { &self.blockchain } - fn state_at(&self, hash: Block::Hash) -> ClientResult { - if hash == self.blockchain.meta.read().genesis_hash { - if let Some(genesis_state) = &*self.genesis_state.read() { - let root = genesis_state.root; - let db_state = DbStateBuilder::::new(genesis_state.clone(), root) - .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) - .build(); + fn state_at(&self, block: BlockId) -> ClientResult { + use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; + let is_genesis = match &block { + BlockId::Number(n) if n.is_zero() => true, + BlockId::Hash(h) if h == &self.blockchain.meta.read().genesis_hash => true, + _ => false, + }; + if is_genesis { + if let Some(genesis_state) = &*self.genesis_state.read() { + let db_state = DbState::::new(genesis_state.clone(), genesis_state.root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - return Ok(RecordStatsState::new(state, None, self.state_usage.clone())) + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); + let mut state = SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + ); + state.disable_syncing(); + return Ok(state) } } + let hash = match block { + BlockId::Hash(h) => h, + BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Unknown block number {}", n)) + })?, + }; + match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { - let hint = || { - sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) - .unwrap_or(None) - .is_some() - }; - if let Ok(()) = - self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) - { + if !self.have_state_at(&hash, hdr.number) { + return Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))) + } + if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root; - let db_state = DbStateBuilder::::new(self.storage.clone(), root) - .with_optional_cache( - self.shared_trie_cache.as_ref().map(|c| c.local_cache()), - ) - .build(); + let db_state = DbState::::new(self.storage.clone(), root); let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); - Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) + let caching_state = + CachingState::new(state, self.shared_cache.clone(), Some(hash)); + Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", - hash + block ))) } }, @@ -2359,9 +2274,9 @@ impl sc_client_api::backend::Backend for Backend { } } - fn have_state_at(&self, hash: Block::Hash, number: NumberFor) -> bool { + fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { if self.is_archive { - match self.blockchain.header_metadata(hash) { + match self.blockchain.header_metadata(*hash) { Ok(header) => sp_state_machine::Storage::get( self.storage.as_ref(), &header.state_root, @@ -2372,20 +2287,7 @@ impl sc_client_api::backend::Backend for Backend { _ => false, } } else { - match self.storage.state_db.is_pruned(&hash, number.saturated_into::()) { - IsPruned::Pruned => false, - IsPruned::NotPruned => true, - IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) { - Ok(header) => sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ) - .unwrap_or(None) - .is_some(), - _ => false, - }, - } + !self.storage.state_db.is_pruned(hash, number.saturated_into::()) } } @@ -2457,9 +2359,13 @@ pub(crate) mod tests { }; let header_hash = header.hash(); - let block_hash = if number == 0 { Default::default() } else { parent_hash }; + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_hash).unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); @@ -2500,17 +2406,21 @@ pub(crate) mod tests { assert!(db.blockchain().hash(i).unwrap().is_none()); { - let hash = if i == 0 { - Default::default() + let id = if i == 0 { + BlockId::Hash(Default::default()) } else { - db.blockchain.hash(i - 1).unwrap().unwrap() + BlockId::Number(i - 1) }; let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, hash).unwrap(); + db.begin_state_operation(&mut op, id).unwrap(); let header = Header { number: i, - parent_hash: hash, + parent_hash: if i == 0 { + Default::default() + } else { + db.blockchain.hash(i - 1).unwrap().unwrap() + }, state_root: Default::default(), digest: Default::default(), extrinsics_root: Default::default(), @@ -2528,10 +2438,11 @@ pub(crate) mod tests { let backend = Backend::::new( DatabaseSettings { - trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(PruningMode::blocks_pruning(1)), + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + state_pruning: Some(PruningMode::keep_blocks(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - blocks_pruning: BlocksPruning::KeepFinalized, + keep_blocks: KeepBlocks::All, }, 0, ) @@ -2581,7 +2492,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(hash).unwrap(); + let state = db.state_at(BlockId::Number(0)).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2592,7 +2503,7 @@ pub(crate) mod tests { { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, hash).unwrap(); + db.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2611,12 +2522,12 @@ pub(crate) mod tests { header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); db.commit_operation(op).unwrap(); - let state = db.state_at(header.hash()).unwrap(); + let state = db.state_at(BlockId::Number(1)).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2633,7 +2544,9 @@ pub(crate) mod tests { let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, Default::default()).unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -2668,9 +2581,9 @@ pub(crate) mod tests { hash }; - let hashof1 = { + let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hash).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2705,12 +2618,12 @@ pub(crate) mod tests { hash }; - let hashof2 = { + let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hashof1).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); let mut header = Header { number: 2, - parent_hash: hashof1, + parent_hash: hash, state_root: Default::default(), digest: Default::default(), extrinsics_root: Default::default(), @@ -2739,12 +2652,12 @@ pub(crate) mod tests { hash }; - let hashof3 = { + { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hashof2).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap(); let mut header = Header { number: 3, - parent_hash: hashof2, + parent_hash: hash, state_root: Default::default(), digest: Default::default(), extrinsics_root: Default::default(), @@ -2757,7 +2670,6 @@ pub(crate) mod tests { .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) .0 .into(); - let hash = header.hash(); op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2768,12 +2680,11 @@ pub(crate) mod tests { .db .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) .is_none()); - hash - }; + } - backend.finalize_block(hashof1, None).unwrap(); - backend.finalize_block(hashof2, None).unwrap(); - backend.finalize_block(hashof3, None).unwrap(); + backend.finalize_block(BlockId::Number(1), None).unwrap(); + backend.finalize_block(BlockId::Number(2), None).unwrap(); + backend.finalize_block(BlockId::Number(3), None).unwrap(); assert!(backend .storage .db @@ -2996,8 +2907,8 @@ pub(crate) mod tests { vec![block2_a, block2_b, block2_c, block1_c] ); - backend.finalize_block(block1_a, None).unwrap(); - backend.finalize_block(block2_a, None).unwrap(); + backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); + backend.finalize_block(BlockId::hash(block2_a), None).unwrap(); // leaves at same height stay. Leaves at lower heights pruned. assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); @@ -3021,13 +2932,13 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); - backend.finalize_block(block1, justification.clone()).unwrap(); + backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); assert_eq!( - backend.blockchain().justifications(block1).unwrap(), + backend.blockchain().justifications(BlockId::Number(1)).unwrap(), justification.map(Justifications::from), ); } @@ -3039,17 +2950,17 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); - backend.finalize_block(block1, Some(just0.clone().into())).unwrap(); + backend.finalize_block(BlockId::Number(1), Some(just0.clone().into())).unwrap(); let just1 = (CONS1_ENGINE_ID, vec![4, 5]); - backend.append_justification(block1, just1.clone()).unwrap(); + backend.append_justification(BlockId::Number(1), just1.clone()).unwrap(); let just2 = (CONS1_ENGINE_ID, vec![6, 7]); assert!(matches!( - backend.append_justification(block1, just2), + backend.append_justification(BlockId::Number(1), just2), Err(ClientError::BadJustification(_)) )); @@ -3058,7 +2969,10 @@ pub(crate) mod tests { just.append(just1); just }; - assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),); + assert_eq!( + backend.blockchain().justifications(BlockId::Number(1)).unwrap(), + Some(justifications), + ); } #[test] @@ -3072,16 +2986,16 @@ pub(crate) mod tests { let block4 = insert_header(&backend, 4, block3, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block0).unwrap(); - op.mark_finalized(block1, None).unwrap(); - op.mark_finalized(block2, None).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); + op.mark_finalized(BlockId::Hash(block1), None).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); backend.commit_operation(op).unwrap(); } { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block2).unwrap(); - op.mark_finalized(block3, None).unwrap(); - op.mark_finalized(block4, None).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + op.mark_finalized(BlockId::Hash(block3), None).unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); backend.commit_operation(op).unwrap(); } } @@ -3093,7 +3007,9 @@ pub(crate) mod tests { let hash0 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, Default::default()).unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -3127,11 +3043,15 @@ pub(crate) mod tests { hash }; - let block0_hash = backend.state_at(hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); + let block0_hash = backend + .state_at(BlockId::Hash(hash0)) + .unwrap() + .storage_hash(&b"test"[..]) + .unwrap(); let hash1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, hash0).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); let mut header = Header { number: 1, parent_hash: hash0, @@ -3166,7 +3086,11 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - let block1_hash = backend.state_at(hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); + let block1_hash = backend + .state_at(BlockId::Hash(hash1)) + .unwrap() + .storage_hash(&b"test"[..]) + .unwrap(); assert_ne!(block0_hash, block1_hash); } @@ -3180,15 +3104,15 @@ pub(crate) mod tests { let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block0).unwrap(); - op.mark_finalized(block2, None).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); backend.commit_operation(op).unwrap_err(); } } #[test] fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); + let backend = Backend::::new_test_with_tx_storage(2, 0); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3208,128 +3132,23 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); for i in 1..5 { - op.mark_finalized(blocks[i], None).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); } backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(blocks[0]).unwrap()); - assert_eq!(None, bc.body(blocks[1]).unwrap()); - assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - } - - #[test] - fn prune_blocks_on_finalize_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - for i in 1..3 { - op.mark_finalized(blocks[i], None).unwrap(); - } - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - } - - #[test] - fn prune_blocks_on_finalize_with_fork_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - // insert a fork at block 2 - let fork_hash_root = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![2.into()], - None, - ) - .unwrap(); - insert_block( - &backend, - 3, - fork_hash_root, - None, - H256::random(), - vec![3.into(), 11.into()], - None, - ) - .unwrap(); - - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_head(blocks[4]).unwrap(); - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - - for i in 1..5 { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[i]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); - backend.commit_operation(op).unwrap(); - } - - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - assert_eq!(bc.info().best_number, 4); - for i in 0..5 { - assert!(bc.hash(i).unwrap().is_some()); - } + assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); } #[test] fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); + let backend = Backend::::new_test_with_tx_storage(2, 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3369,28 +3188,28 @@ pub(crate) mod tests { ) .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_head(blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(BlockId::Hash(blocks[4])).unwrap(); backend.commit_operation(op).unwrap(); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(blocks[0]).unwrap()); - assert_eq!(None, bc.body(blocks[1]).unwrap()); - assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); } #[test] fn indexed_data_block_body() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); + let backend = Backend::::new_test_with_tx_storage(1, 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3419,22 +3238,20 @@ pub(crate) mod tests { ) .unwrap(); let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]); - assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]); + assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[1..]); + assert_eq!(bc.indexed_transaction(&x1_hash).unwrap().unwrap(), &x1[1..]); - let hashof0 = bc.info().genesis_hash; // Push one more blocks and make sure block is pruned and transaction index is cleared. - let block1 = - insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); - backend.finalize_block(block1, None).unwrap(); - assert_eq!(bc.body(hashof0).unwrap(), None); - assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None); - assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); + insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); + backend.finalize_block(BlockId::Number(1), None).unwrap(); + assert_eq!(bc.body(BlockId::Number(0)).unwrap(), None); + assert_eq!(bc.indexed_transaction(&x0_hash).unwrap(), None); + assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); } #[test] fn index_invalid_size() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); + let backend = Backend::::new_test_with_tx_storage(1, 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3463,13 +3280,13 @@ pub(crate) mod tests { ) .unwrap(); let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]); - assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); + assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[..]); + assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); } #[test] fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); + let backend = Backend::::new_test_with_tx_storage(2, 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); @@ -3502,21 +3319,21 @@ pub(crate) mod tests { for i in 1..10 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); if i < 6 { - assert!(bc.indexed_transaction(x1_hash).unwrap().is_some()); + assert!(bc.indexed_transaction(&x1_hash).unwrap().is_some()); } else { - assert!(bc.indexed_transaction(x1_hash).unwrap().is_none()); + assert!(bc.indexed_transaction(&x1_hash).unwrap().is_none()); } } } #[test] fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); + let backend = Backend::::new_test_with_tx_storage(2, 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..2 { @@ -3534,21 +3351,7 @@ pub(crate) mod tests { prev_hash = hash; } - for i in 0..2 { - let hash = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - } - - // insert a fork at block 1, which becomes best block + // insert a fork at block 2, which becomes best block let best_hash = insert_block( &backend, 1, @@ -3559,36 +3362,11 @@ pub(crate) mod tests { None, ) .unwrap(); - - assert_eq!(backend.blockchain().info().best_hash, best_hash); - assert!(backend.remove_leaf_block(best_hash).is_err()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]); - assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); - - assert!(backend.have_state_at(blocks[3], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); - backend.remove_leaf_block(blocks[3]).unwrap(); - assert!(!backend.have_state_at(blocks[3], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); - assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); - - assert!(backend.have_state_at(blocks[2], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); - backend.remove_leaf_block(blocks[2]).unwrap(); - assert!(!backend.have_state_at(blocks[2], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); - assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); - - assert!(backend.have_state_at(blocks[1], 1)); - assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); - backend.remove_leaf_block(blocks[1]).unwrap(); - assert!(!backend.have_state_at(blocks[1], 1)); - assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); - assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); + assert!(backend.remove_leaf_block(&best_hash).is_err()); + assert!(backend.have_state_at(&prev_hash, 1)); + backend.remove_leaf_block(&prev_hash).unwrap(); + assert_eq!(None, backend.blockchain().header(BlockId::hash(prev_hash.clone())).unwrap()); + assert!(!backend.have_state_at(&prev_hash, 1)); } #[test] @@ -3678,119 +3456,11 @@ pub(crate) mod tests { let block1_a = insert_header(&backend, 1, block0, None, Default::default()); let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - backend.finalize_block(block1_a, None).unwrap(); + backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); // Insert a fork prior to finalization point. Leave should not be created. insert_header_no_head(&backend, 1, block0, [1; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); } - - #[test] - fn revert_non_best_blocks() { - let backend = Backend::::new_test(10, 10); - - let genesis = - insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None) - .unwrap(); - - let block1 = - insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap(); - - let block2 = - insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap(); - - let block3 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block1).unwrap(); - let header = Header { - number: 3, - parent_hash: block2, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - header.hash() - }; - - let block4 = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block2).unwrap(); - let header = Header { - number: 4, - parent_hash: block3, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - header.hash() - }; - - let block3_fork = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block2).unwrap(); - let header = Header { - number: 3, - parent_hash: block2, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: H256::from_low_u64_le(42), - }; - - op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) - .unwrap(); - - backend.commit_operation(op).unwrap(); - - header.hash() - }; - - assert!(backend.have_state_at(block1, 1)); - assert!(backend.have_state_at(block2, 2)); - assert!(backend.have_state_at(block3, 3)); - assert!(backend.have_state_at(block4, 4)); - assert!(backend.have_state_at(block3_fork, 3)); - - assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); - assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); - - assert_eq!(3, backend.revert(1, false).unwrap().0); - - assert!(backend.have_state_at(block1, 1)); - assert!(!backend.have_state_at(block2, 2)); - assert!(!backend.have_state_at(block3, 3)); - assert!(!backend.have_state_at(block4, 4)); - assert!(!backend.have_state_at(block3_fork, 3)); - - assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); - assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); - } - - #[test] - fn test_no_duplicated_leaves_allowed() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - // Add block 2 not as the best block - let block2 = insert_header_no_head(&backend, 2, block1, Default::default()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); - assert_eq!(backend.blockchain().info().best_hash, block1); - - // Add block 2 as the best block - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); - assert_eq!(backend.blockchain().info().best_hash, block2); - } } diff --git a/client/db/src/record_stats_state.rs b/client/db/src/record_stats_state.rs deleted file mode 100644 index 0b51d3fef2127..0000000000000 --- a/client/db/src/record_stats_state.rs +++ /dev/null @@ -1,230 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Provides [`RecordStatsState`] for recording stats about state access. - -use crate::stats::StateUsageStats; -use sp_core::storage::ChildInfo; -use sp_runtime::{ - traits::{Block as BlockT, HashFor}, - StateVersion, -}; -use sp_state_machine::{ - backend::{AsTrieBackend, Backend as StateBackend}, - TrieBackend, -}; -use std::sync::Arc; - -/// State abstraction for recording stats about state access. -pub struct RecordStatsState { - /// Usage statistics - usage: StateUsageStats, - /// State machine registered stats - overlay_stats: sp_state_machine::StateMachineStats, - /// Backing state. - state: S, - /// The hash of the block is state belongs to. - block_hash: Option, - /// The usage statistics of the backend. These will be updated on drop. - state_usage: Arc, -} - -impl std::fmt::Debug for RecordStatsState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Block {:?}", self.block_hash) - } -} - -impl Drop for RecordStatsState { - fn drop(&mut self) { - self.state_usage.merge_sm(self.usage.take()); - } -} - -impl>, B: BlockT> RecordStatsState { - /// Create a new instance wrapping generic State and shared cache. - pub(crate) fn new( - state: S, - block_hash: Option, - state_usage: Arc, - ) -> Self { - RecordStatsState { - usage: StateUsageStats::new(), - overlay_stats: sp_state_machine::StateMachineStats::default(), - state, - block_hash, - state_usage, - } - } -} - -impl>, B: BlockT> StateBackend> for RecordStatsState { - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - let value = self.state.storage(key)?; - self.usage.tally_key_read(key, value.as_ref(), false); - Ok(value) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.storage_hash(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - let key = (child_info.storage_key().to_vec(), key.to_vec()); - let value = self.state.child_storage(child_info, &key.1)?; - - // just pass it through the usage counter - let value = self.usage.tally_child_key_read(&key, value, false); - - Ok(value) - } - - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.state.child_storage_hash(child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.state.exists_storage(key) - } - - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result { - self.state.exists_child_storage(child_info, key) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.state - .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.state.apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.state.for_child_keys_with_prefix(child_info, prefix, f) - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, Self::Transaction) - where - B::Hash: Ord, - { - self.state.storage_root(delta, state_version) - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (B::Hash, bool, Self::Transaction) - where - B::Hash: Ord, - { - self.state.child_storage_root(child_info, delta, state_version) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.state.child_keys(child_info, prefix) - } - - fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { - self.overlay_stats.add(stats); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - let mut info = self.usage.take(); - info.include_state_machine_states(&self.overlay_stats); - info - } -} - -impl> + AsTrieBackend>, B: BlockT> AsTrieBackend> - for RecordStatsState -{ - type TrieBackendStorage = >>::TrieBackendStorage; - - fn as_trie_backend(&self) -> &TrieBackend> { - self.state.as_trie_backend() - } -} diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index d9253fe09eb50..8326946999946 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1223,7 +1223,7 @@ mod tests { let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), - Some(root_parent), + Some(root_parent.clone()), ); let key = H256::random()[..].to_vec(); @@ -1307,14 +1307,14 @@ mod tests { let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), - Some(root_parent), + Some(root_parent.clone()), ); s.cache.sync_cache( &[], &[], vec![(key.clone(), Some(vec![2]))], vec![], - Some(h0), + Some(h0.clone()), Some(0), true, ); diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 51750bf689759..292905663a20b 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -115,7 +115,7 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> Upgra /// 2) transactions column is added; fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); - let mut db = Database::open(&db_cfg, db_path)?; + let db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) } @@ -126,10 +126,7 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr let db = Database::open(&db_cfg, db_path)?; // Get all the keys we need to update - let keys: Vec<_> = db - .iter(columns::JUSTIFICATIONS) - .map(|r| r.map(|e| e.0)) - .collect::>()?; + let keys: Vec<_> = db.iter(columns::JUSTIFICATIONS).map(|entry| entry.0).collect(); // Read and update each entry let mut transaction = db.transaction(); @@ -155,7 +152,7 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr /// 2) BODY_INDEX column is added; fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V3_NUM_COLUMNS); - let mut db = Database::open(&db_cfg, db_path)?; + let db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index e48c27dfc998e..566ed0a50fc0f 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -15,10 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" -lru = "0.8.1" -parking_lot = "0.12.1" +lru = "0.7.5" +parking_lot = "0.12.0" tracing = "0.1.29" -wasmi = "0.13" +wasmi = "0.9.1" codec = { package = "parity-scale-codec", version = "3.0.0" } sc-executor-common = { version = "0.10.0-dev", path = "common" } @@ -31,13 +31,14 @@ sp-externalities = { version = "0.12.0", path = "../../primitives/externalities" sp-io = { version = "6.0.0", path = "../../primitives/io" } sp-panic-handler = { version = "4.0.0", path = "../../primitives/panic-handler" } sp-runtime-interface = { version = "6.0.0", path = "../../primitives/runtime-interface" } +sp-tasks = { version = "4.0.0-dev", path = "../../primitives/tasks" } sp-trie = { version = "6.0.0", path = "../../primitives/trie" } sp-version = { version = "5.0.0", path = "../../primitives/version" } sp-wasm-interface = { version = "6.0.0", path = "../../primitives/wasm-interface" } [dev-dependencies] -array-bytes = "4.1" wat = "1.0" +hex-literal = "0.3.4" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } sp-state-machine = { version = "0.12.0", path = "../../primitives/state-machine" } @@ -46,7 +47,7 @@ sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/may sc-tracing = { version = "4.0.0-dev", path = "../tracing" } tracing-subscriber = "0.2.19" paste = "1.0" -regex = "1.6.0" +regex = "1.5.5" criterion = "0.3" env_logger = "0.9" num_cpus = "1.13.1" @@ -62,4 +63,5 @@ default = ["std"] std = [] wasm-extern-trace = [] wasmtime = ["sc-executor-wasmtime"] +wasmi-errno = ["wasmi/errno"] wasmer-sandbox = ["sc-executor-common/wasmer-sandbox"] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 71a6f2c324591..9ffdfb788474d 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -17,12 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } environmental = "1.1.3" thiserror = "1.0.30" -wasm-instrument = "0.3" +wasm-instrument = "0.1" wasmer = { version = "2.2", features = ["singlepass"], optional = true } -wasmi = "0.13" +wasmi = "0.9.1" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" } sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } +sp-serializer = { version = "4.0.0-dev", path = "../../../primitives/serializer" } sp-wasm-interface = { version = "6.0.0", path = "../../../primitives/wasm-interface" } [features] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 376ac190bd7b7..09e070bb3bae5 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -18,6 +18,7 @@ //! Rust executor possible errors. +use sp_serializer; use wasmi; /// Result type alias. @@ -27,6 +28,9 @@ pub type Result = std::result::Result; #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { + #[error("Unserializable data encountered")] + InvalidData(#[from] sp_serializer::Error), + #[error(transparent)] Wasmi(#[from] wasmi::Error), diff --git a/client/executor/common/src/sandbox/wasmi_backend.rs b/client/executor/common/src/sandbox/wasmi_backend.rs index 2ba133f5f15b1..03fa5dc06dea8 100644 --- a/client/executor/common/src/sandbox/wasmi_backend.rs +++ b/client/executor/common/src/sandbox/wasmi_backend.rs @@ -18,14 +18,14 @@ //! Wasmi specific impls for sandbox -use std::{fmt, rc::Rc}; +use std::rc::Rc; use codec::{Decode, Encode}; use sp_sandbox::HostError; use sp_wasm_interface::{FunctionContext, Pointer, ReturnValue, Value, WordSize}; use wasmi::{ memory_units::Pages, ImportResolver, MemoryInstance, Module, ModuleInstance, RuntimeArgs, - RuntimeValue, Trap, + RuntimeValue, Trap, TrapKind, }; use crate::{ @@ -39,20 +39,9 @@ use crate::{ environmental::environmental!(SandboxContextStore: trait SandboxContext); -#[derive(Debug)] -struct CustomHostError(String); - -impl fmt::Display for CustomHostError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "HostError: {}", self.0) - } -} - -impl wasmi::HostError for CustomHostError {} - /// Construct trap error from specified message fn trap(msg: &'static str) -> Trap { - Trap::host(CustomHostError(msg.into())) + TrapKind::Host(Box::new(Error::Other(msg.into()))).into() } impl ImportResolver for Imports { diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index f90b2e1439a77..7a7848700c4c1 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -19,6 +19,7 @@ sp-io = { version = "6.0.0", default-features = false, features = ["improved_pan sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } +sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } @@ -31,4 +32,5 @@ std = [ "sp-runtime/std", "sp-sandbox/std", "sp-std/std", + "sp-tasks/std", ] diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 0424ad418617b..0c61d6fcd38a2 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -54,269 +54,287 @@ static mut MUTABLE_STATIC: u64 = 32; static mut MUTABLE_STATIC_BSS: u64 = 0; sp_core::wasm_export_functions! { - fn test_calling_missing_external() { - unsafe { missing_external() } - } + fn test_calling_missing_external() { + unsafe { missing_external() } + } - fn test_calling_yet_another_missing_external() { - unsafe { yet_another_missing_external() } - } + fn test_calling_yet_another_missing_external() { + unsafe { yet_another_missing_external() } + } - fn test_data_in(input: Vec) -> Vec { - print("set_storage"); - storage::set(b"input", &input); + fn test_data_in(input: Vec) -> Vec { + print("set_storage"); + storage::set(b"input", &input); - print("storage"); - let foo = storage::get(b"foo").unwrap(); + print("storage"); + let foo = storage::get(b"foo").unwrap(); - print("set_storage"); - storage::set(b"baz", &foo); + print("set_storage"); + storage::set(b"baz", &foo); - print("finished!"); - b"all ok!".to_vec() - } + print("finished!"); + b"all ok!".to_vec() + } - fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input, None); - b"all ok!".to_vec() - } + fn test_clear_prefix(input: Vec) -> Vec { + storage::clear_prefix(&input, None); + b"all ok!".to_vec() + } - fn test_empty_return() {} + fn test_empty_return() {} - fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { - // This piece of code will dirty multiple pages of memory. The number of pages is given by - // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared - // is a wasm page that that follows the one that holds the `heap_base` address. - // - // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take - // 16 writes to process a single wasm page. + fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { + // This piece of code will dirty multiple pages of memory. The number of pages is given by + // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared + // is a wasm page that that follows the one that holds the `heap_base` address. + // + // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take + // 16 writes to process a single wasm page. - let heap_ptr = heap_base as usize; + let heap_ptr = heap_base as usize; - // Find the next wasm page boundary. - let heap_ptr = round_up_to(heap_ptr, 65536); + // Find the next wasm page boundary. + let heap_ptr = round_up_to(heap_ptr, 65536); - // Make it an actual pointer - let heap_ptr = heap_ptr as *mut u8; + // Make it an actual pointer + let heap_ptr = heap_ptr as *mut u8; - // Traverse the host pages and make each one dirty - let host_pages = heap_pages as usize * 16; - for i in 0..host_pages { - unsafe { - // technically this is an UB, but there is no way Rust can find this out. - heap_ptr.add(i * 4096).write(0); - } - } + // Traverse the host pages and make each one dirty + let host_pages = heap_pages as usize * 16; + for i in 0..host_pages { + unsafe { + // technically this is an UB, but there is no way Rust can find this out. + heap_ptr.add(i * 4096).write(0); + } + } - fn round_up_to(n: usize, divisor: usize) -> usize { - (n + divisor - 1) / divisor - } - } + fn round_up_to(n: usize, divisor: usize) -> usize { + (n + divisor - 1) / divisor + } + } fn test_allocate_vec(size: u32) -> Vec { Vec::with_capacity(size as usize) } - fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { - let a = f32::from_le_bytes(a); - let b = f32::from_le_bytes(b); - f32::to_le_bytes(a + b) - } - - fn test_panic() { panic!("test panic") } - - fn test_conditional_panic(input: Vec) -> Vec { - if input.len() > 0 { - panic!("test panic") - } - - input - } - - fn test_blake2_256(input: Vec) -> Vec { - blake2_256(&input).to_vec() - } - - fn test_blake2_128(input: Vec) -> Vec { - blake2_128(&input).to_vec() - } - - fn test_sha2_256(input: Vec) -> Vec { - sha2_256(&input).to_vec() - } - - fn test_twox_256(input: Vec) -> Vec { - twox_256(&input).to_vec() - } - - fn test_twox_128(input: Vec) -> Vec { - twox_128(&input).to_vec() - } - - fn test_ed25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) - } - - fn test_sr25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) - } - - fn test_ordered_trie_root() -> Vec { - BlakeTwo256::ordered_trie_root( - vec![ - b"zero"[..].into(), - b"one"[..].into(), - b"two"[..].into(), - ], - sp_core::storage::StateVersion::V1, - ).as_ref().to_vec() - } - - fn test_offchain_index_set() { - sp_io::offchain_index::set(b"k", b"v"); - } - - fn test_offchain_local_storage() -> bool { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - sp_io::offchain::local_storage_set(kind, b"test", b"asd"); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - b"test", - Some(b"asd".to_vec()), - b"", - ); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); - res - } - - fn test_offchain_local_storage_with_none() { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - - let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); - assert_eq!(res, true); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); - } - - fn test_offchain_http() -> bool { - use sp_core::offchain::HttpRequestStatus; - let run = || -> Option<()> { - let id = sp_io::offchain::http_request_start( - "POST", - "http://localhost:12345", - &[], - ).ok()?; - sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; - sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; - sp_io::offchain::http_request_write_body(id, &[], None).ok()?; - let status = sp_io::offchain::http_response_wait(&[id], None); - assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); - let headers = sp_io::offchain::http_response_headers(id); - assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); - let mut buffer = vec![0; 64]; - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 3); - assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 0); - - Some(()) - }; - - run().is_some() - } - - fn test_enter_span() -> u64 { - wasm_tracing::enter_span(Default::default()) - } - - fn test_exit_span(span_id: u64) { - wasm_tracing::exit(span_id) - } - - fn test_nested_spans() { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - wasm_tracing::exit(span_id); - } - wasm_tracing::exit(span_id); - } - - fn returns_mutable_static() -> u64 { - unsafe { - MUTABLE_STATIC += 1; - MUTABLE_STATIC - } - } - - fn returns_mutable_static_bss() -> u64 { - unsafe { - MUTABLE_STATIC_BSS += 1; - MUTABLE_STATIC_BSS - } - } - - fn allocates_huge_stack_array(trap: bool) -> Vec { - // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). - // This will just decrease (stacks in wasm32-u-u grow downwards) the stack - // pointer. This won't trap on the current compilers. - let mut data = [0u8; 1024 * 768]; - - // Then make sure we actually write something to it. - // - // If: - // 1. the stack area is placed at the beginning of the linear memory space, and - // 2. the stack pointer points to out-of-bounds area, and - // 3. a write is performed around the current stack pointer. - // - // then a trap should happen. - // - for (i, v) in data.iter_mut().enumerate() { - *v = i as u8; // deliberate truncation - } - - if trap { - // There is a small chance of this to be pulled up in theory. In practice - // the probability of that is rather low. - panic!() - } - - data.to_vec() - } - - // Check that the heap at `heap_base + offset` don't contains the test message. - // After the check succeeds the test message is written into the heap. - // - // It is expected that the given pointer is not allocated. - fn check_and_set_in_heap(heap_base: u32, offset: u32) { - let test_message = b"Hello invalid heap memory"; - let ptr = (heap_base + offset) as *mut u8; - - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; - - assert_ne!(test_message, message_slice); - message_slice.copy_from_slice(test_message); - } + fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { + let a = f32::from_le_bytes(a); + let b = f32::from_le_bytes(b); + f32::to_le_bytes(a + b) + } + + fn test_panic() { panic!("test panic") } + + fn test_conditional_panic(input: Vec) -> Vec { + if input.len() > 0 { + panic!("test panic") + } + + input + } + + fn test_blake2_256(input: Vec) -> Vec { + blake2_256(&input).to_vec() + } + + fn test_blake2_128(input: Vec) -> Vec { + blake2_128(&input).to_vec() + } + + fn test_sha2_256(input: Vec) -> Vec { + sha2_256(&input).to_vec() + } + + fn test_twox_256(input: Vec) -> Vec { + twox_256(&input).to_vec() + } + + fn test_twox_128(input: Vec) -> Vec { + twox_128(&input).to_vec() + } + + fn test_ed25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + } + + fn test_sr25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + } + + fn test_ordered_trie_root() -> Vec { + BlakeTwo256::ordered_trie_root( + vec![ + b"zero"[..].into(), + b"one"[..].into(), + b"two"[..].into(), + ], + sp_core::storage::StateVersion::V1, + ).as_ref().to_vec() + } + + fn test_offchain_index_set() { + sp_io::offchain_index::set(b"k", b"v"); + } + + fn test_offchain_local_storage() -> bool { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + sp_io::offchain::local_storage_set(kind, b"test", b"asd"); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); + + let res = sp_io::offchain::local_storage_compare_and_set( + kind, + b"test", + Some(b"asd".to_vec()), + b"", + ); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); + res + } + + fn test_offchain_local_storage_with_none() { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + + let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); + assert_eq!(res, true); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); + } + + fn test_offchain_http() -> bool { + use sp_core::offchain::HttpRequestStatus; + let run = || -> Option<()> { + let id = sp_io::offchain::http_request_start( + "POST", + "http://localhost:12345", + &[], + ).ok()?; + sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; + sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; + sp_io::offchain::http_request_write_body(id, &[], None).ok()?; + let status = sp_io::offchain::http_response_wait(&[id], None); + assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); + let headers = sp_io::offchain::http_response_headers(id); + assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); + let mut buffer = vec![0; 64]; + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 3); + assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 0); + + Some(()) + }; + + run().is_some() + } + + fn test_enter_span() -> u64 { + wasm_tracing::enter_span(Default::default()) + } + + fn test_exit_span(span_id: u64) { + wasm_tracing::exit(span_id) + } + + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + + fn returns_mutable_static() -> u64 { + unsafe { + MUTABLE_STATIC += 1; + MUTABLE_STATIC + } + } + + fn returns_mutable_static_bss() -> u64 { + unsafe { + MUTABLE_STATIC_BSS += 1; + MUTABLE_STATIC_BSS + } + } + + fn allocates_huge_stack_array(trap: bool) -> Vec { + // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). + // This will just decrease (stacks in wasm32-u-u grow downwards) the stack + // pointer. This won't trap on the current compilers. + let mut data = [0u8; 1024 * 768]; + + // Then make sure we actually write something to it. + // + // If: + // 1. the stack area is placed at the beginning of the linear memory space, and + // 2. the stack pointer points to out-of-bounds area, and + // 3. a write is performed around the current stack pointer. + // + // then a trap should happen. + // + for (i, v) in data.iter_mut().enumerate() { + *v = i as u8; // deliberate truncation + } + + if trap { + // There is a small chance of this to be pulled up in theory. In practice + // the probability of that is rather low. + panic!() + } + + data.to_vec() + } + + // Check that the heap at `heap_base + offset` don't contains the test message. + // After the check succeeds the test message is written into the heap. + // + // It is expected that the given pointer is not allocated. + fn check_and_set_in_heap(heap_base: u32, offset: u32) { + let test_message = b"Hello invalid heap memory"; + let ptr = (heap_base + offset) as *mut u8; + + let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + + assert_ne!(test_message, message_slice); + message_slice.copy_from_slice(test_message); + } + + fn test_spawn() { + let data = vec![1u8, 2u8]; + let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); + + assert_eq!(data_new, vec![2u8, 3u8]); + } + + fn test_nested_spawn() { + let data = vec![7u8, 13u8]; + let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); + + assert_eq!(data_new, vec![10u8, 16u8]); + } + + fn test_panic_in_spawned() { + sp_tasks::spawn(tasks::panicker, vec![]).join(); + } fn test_return_i8() -> i8 { -66 @@ -333,10 +351,24 @@ sp_core::wasm_export_functions! { fn test_unreachable_intrinsic() { core::arch::wasm32::unreachable() } +} + +#[cfg(not(feature = "std"))] +mod tasks { + use sp_std::prelude::*; + + pub fn incrementer(data: Vec) -> Vec { + data.into_iter().map(|v| v + 1).collect() + } + + pub fn panicker(_: Vec) -> Vec { + panic!() + } - fn test_return_value() -> u64 { - // Mainly a test that the macro is working when we have a return statement here. - return 1234; + pub fn parallel_incrementer(data: Vec) -> Vec { + let first = data.into_iter().map(|v| v + 2).collect::>(); + let second = sp_tasks::spawn(incrementer, first).join(); + second } } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 3217f9f96ca79..8ce0b56da2389 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -21,6 +21,7 @@ mod linux; mod sandbox; use codec::{Decode, Encode}; +use hex_literal::hex; use sc_executor_common::{error::Error, runtime_blob::RuntimeBlob, wasm_runtime::WasmModule}; use sc_runtime_test::wasm_binary_unwrap; use sp_core::{ @@ -252,7 +253,7 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_calling_missing_external", &[], wasm_method, &mut ext).unwrap_err() { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Other: Function `missing_external` is only a stub. Calling a stub is not allowed.", + WasmExecutionMethod::Interpreted => "Trap: Host(Other(\"Function `missing_external` is only a stub. Calling a stub is not allowed.\"))", #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled { .. } => "call to a missing function env:missing_external" }; @@ -272,7 +273,7 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Other: Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.", + WasmExecutionMethod::Interpreted => "Trap: Host(Other(\"Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.\"))", #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled { .. } => "call to a missing function env:yet_another_missing_external" }; @@ -390,18 +391,16 @@ fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); assert_eq!( call_in_wasm("test_sha2_256", &[0], wasm_method, &mut ext,).unwrap(), - array_bytes::hex2bytes_unchecked( - "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - ) - .encode(), + hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + .to_vec() + .encode(), ); assert_eq!( call_in_wasm("test_sha2_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) .unwrap(), - array_bytes::hex2bytes_unchecked( - "c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a" - ) - .encode(), + hex!("c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a") + .to_vec() + .encode(), ); } @@ -411,18 +410,16 @@ fn twox_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); assert_eq!( call_in_wasm("test_twox_256", &[0], wasm_method, &mut ext,).unwrap(), - array_bytes::hex2bytes_unchecked( - "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" - ) - .encode(), + hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") + .to_vec() + .encode(), ); assert_eq!( call_in_wasm("test_twox_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) .unwrap(), - array_bytes::hex2bytes_unchecked( - "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" - ) - .encode(), + hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") + .to_vec() + .encode(), ); } @@ -432,12 +429,12 @@ fn twox_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); assert_eq!( call_in_wasm("test_twox_128", &[0], wasm_method, &mut ext,).unwrap(), - array_bytes::hex2bytes_unchecked("99e9d85137db46ef4bbea33613baafd5").encode(), + hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), ); assert_eq!( call_in_wasm("test_twox_128", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) .unwrap(), - array_bytes::hex2bytes_unchecked("b27dfd7f223f177f2a13647b533599af").encode(), + hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), ); } @@ -707,7 +704,7 @@ fn parallel_execution(wasm_method: WasmExecutionMethod) { &[0], ) .unwrap(), - array_bytes::hex2bytes_unchecked("99e9d85137db46ef4bbea33613baafd5").encode() + hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), ); }) }) @@ -770,6 +767,33 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(len, 2); } +test_wasm_execution!(spawning_runtime_instance_should_work); +fn spawning_runtime_instance_should_work(wasm_method: WasmExecutionMethod) { + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + call_in_wasm("test_spawn", &[], wasm_method, &mut ext).unwrap(); +} + +test_wasm_execution!(spawning_runtime_instance_nested_should_work); +fn spawning_runtime_instance_nested_should_work(wasm_method: WasmExecutionMethod) { + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + call_in_wasm("test_nested_spawn", &[], wasm_method, &mut ext).unwrap(); +} + +test_wasm_execution!(panic_in_spawned_instance_panics_on_joining_its_result); +fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecutionMethod) { + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + let error_result = + call_in_wasm("test_panic_in_spawned", &[], wasm_method, &mut ext).unwrap_err(); + + assert!(error_result.to_string().contains("Spawned task")); +} + test_wasm_execution!(memory_is_cleared_between_invocations); fn memory_is_cleared_between_invocations(wasm_method: WasmExecutionMethod) { // This is based on the code generated by compiling a runtime *without* @@ -885,7 +909,7 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_unreachable_intrinsic", &[], wasm_method, &mut ext).unwrap_err() { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "unreachable", + WasmExecutionMethod::Interpreted => "Trap: Unreachable", #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled { .. } => "wasm trap: wasm `unreachable` instruction executed", }; @@ -894,14 +918,3 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) { error => panic!("unexpected error: {:?}", error), } } - -test_wasm_execution!(return_value); -fn return_value(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - - assert_eq!( - call_in_wasm("test_return_value", &[], wasm_method, &mut ext).unwrap(), - (1234u64).encode() - ); -} diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 0eabffb8c87df..d805949d26fd9 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -23,18 +23,28 @@ use crate::{ }; use std::{ + collections::HashMap, marker::PhantomData, panic::{AssertUnwindSafe, UnwindSafe}, path::PathBuf, - sync::Arc, + result, + sync::{ + atomic::{AtomicU64, Ordering}, + mpsc, Arc, + }, }; -use codec::Encode; +use codec::{Decode, Encode}; use sc_executor_common::{ runtime_blob::RuntimeBlob, - wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, + wasm_runtime::{AllocationStats, InvokeMethod, WasmInstance, WasmModule}, }; -use sp_core::traits::{CodeExecutor, Externalities, RuntimeCode}; +use sp_core::{ + traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}, + NativeOrEncoded, +}; +use sp_externalities::ExternalitiesExt as _; +use sp_tasks::new_async_externalities; use sp_version::{GetNativeVersion, NativeVersion, RuntimeVersion}; use sp_wasm_interface::{ExtendedHostFunctions, HostFunctions}; @@ -91,8 +101,7 @@ pub struct WasmExecutor { /// The path to a directory which the executor can leverage for a file cache, e.g. put there /// compiled artifacts. cache_path: Option, - /// Ignore missing function imports. - allow_missing_host_functions: bool, + phantom: PhantomData, } @@ -103,7 +112,6 @@ impl Clone for WasmExecutor { default_heap_pages: self.default_heap_pages, cache: self.cache.clone(), cache_path: self.cache_path.clone(), - allow_missing_host_functions: self.allow_missing_host_functions, phantom: self.phantom, } } @@ -122,13 +130,14 @@ where /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. /// + /// `host_functions` - The set of host functions to be available for import provided by this + /// executor. + /// /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. /// /// `cache_path` - A path to a directory where the executor can place its files for purposes of /// caching. This may be important in cases when there are many different modules with the /// compiled execution method is used. - /// - /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, @@ -145,16 +154,10 @@ where runtime_cache_size, )), cache_path, - allow_missing_host_functions: false, phantom: PhantomData, } } - /// Ignore missing function imports if set true. - pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { - self.allow_missing_host_functions = allow_missing_host_functions - } - /// Execute the given closure `f` with the latest runtime (based on `runtime_code`). /// /// The closure `f` is expected to return `Err(_)` when there happened a `panic!` in native code @@ -168,10 +171,11 @@ where /// runtime is invalidated on any `panic!` to prevent a poisoned state. `ext` is already /// implicitly handled as unwind safe, as we store it in a global variable while executing the /// native runtime. - pub fn with_instance( + fn with_instance( &self, runtime_code: &RuntimeCode, ext: &mut dyn Externalities, + allow_missing_host_functions: bool, f: F, ) -> Result where @@ -187,7 +191,7 @@ where ext, self.method, self.default_heap_pages, - self.allow_missing_host_functions, + allow_missing_host_functions, |module, instance, version, ext| { let module = AssertUnwindSafe(module); let instance = AssertUnwindSafe(instance); @@ -205,7 +209,7 @@ where /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be instantiated with the /// parameters this `WasmExecutor` was initialized with. /// - /// In case of problems with during creation of the runtime or instantiation, a `Err` is + /// In case of problems with during creation of the runtime or instantation, a `Err` is /// returned. that describes the message. #[doc(hidden)] // We use this function for tests across multiple crates. pub fn uncached_call( @@ -271,9 +275,11 @@ where let mut instance = AssertUnwindSafe(instance); let mut ext = AssertUnwindSafe(ext); + let module = AssertUnwindSafe(module); let mut allocation_stats_out = AssertUnwindSafe(allocation_stats_out); with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); let (result, allocation_stats) = instance.call_with_allocation_stats(export_name.into(), call_data); **allocation_stats_out = allocation_stats; @@ -327,24 +333,35 @@ where { type Error = Error; - fn call( + fn call< + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + UnwindSafe, + >( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, method: &str, data: &[u8], _use_native: bool, - ) -> (Result>, bool) { + _native_call: Option, + ) -> (Result>, bool) { tracing::trace!( target: "executor", %method, "Executing function", ); - let result = - self.with_instance(runtime_code, ext, |_, mut instance, _onchain_version, mut ext| { - with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) - }); + let result = self.with_instance( + runtime_code, + ext, + false, + |module, mut instance, _onchain_version, mut ext| { + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(method, data).map(NativeOrEncoded::Encoded) + }) + }, + ); (result, false) } } @@ -358,7 +375,7 @@ where ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.with_instance(runtime_code, ext, |_module, _instance, version, _ext| { + self.with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) }) } @@ -371,7 +388,7 @@ where D: NativeExecutionDispatch, { /// Dummy field to avoid the compiler complaining about us not using `D`. - _dummy: PhantomData, + _dummy: std::marker::PhantomData, /// Native runtime version info. native_version: NativeVersion, /// Fallback wasm executor. @@ -388,17 +405,13 @@ impl NativeElseWasmExecutor { /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. - /// - /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. - /// - /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( fallback_method: WasmExecutionMethod, default_heap_pages: Option, max_runtime_instances: usize, runtime_cache_size: u8, ) -> Self { - let wasm = WasmExecutor::new( + let wasm_executor = WasmExecutor::new( fallback_method, default_heap_pages, max_runtime_instances, @@ -409,14 +422,9 @@ impl NativeElseWasmExecutor { NativeElseWasmExecutor { _dummy: Default::default(), native_version: D::native_version(), - wasm, + wasm: wasm_executor, } } - - /// Ignore missing function imports if set true. - pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { - self.wasm.allow_missing_host_functions = allow_missing_host_functions - } } impl RuntimeVersionOf for NativeElseWasmExecutor { @@ -425,9 +433,10 @@ impl RuntimeVersionOf for NativeElseWasmExecutor ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.wasm.with_instance(runtime_code, ext, |_module, _instance, version, _ext| { - Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) - }) + self.wasm + .with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) } } @@ -437,17 +446,153 @@ impl GetNativeVersion for NativeElseWasmExecutor } } +/// Helper inner struct to implement `RuntimeSpawn` extension. +pub struct RuntimeInstanceSpawn { + module: Arc, + tasks: parking_lot::Mutex>>>, + counter: AtomicU64, + scheduler: Box, +} + +impl RuntimeSpawn for RuntimeInstanceSpawn { + fn spawn_call(&self, dispatcher_ref: u32, func: u32, data: Vec) -> u64 { + let new_handle = self.counter.fetch_add(1, Ordering::Relaxed); + + let (sender, receiver) = mpsc::channel(); + self.tasks.lock().insert(new_handle, receiver); + + let module = self.module.clone(); + let scheduler = self.scheduler.clone(); + self.scheduler.spawn( + "executor-extra-runtime-instance", + None, + Box::pin(async move { + let module = AssertUnwindSafe(module); + + let async_ext = match new_async_externalities(scheduler.clone()) { + Ok(val) => val, + Err(e) => { + tracing::error!( + target: "executor", + error = %e, + "Failed to setup externalities for async context.", + ); + + // This will drop sender and receiver end will panic + return + }, + }; + + let mut async_ext = match async_ext.with_runtime_spawn(Box::new( + RuntimeInstanceSpawn::new(module.clone(), scheduler), + )) { + Ok(val) => val, + Err(e) => { + tracing::error!( + target: "executor", + error = %e, + "Failed to setup runtime extension for async externalities", + ); + + // This will drop sender and receiver end will panic + return + }, + }; + + let result = with_externalities_safe(&mut async_ext, move || { + // FIXME: Should be refactored to shared "instance factory". + // Instantiating wasm here every time is suboptimal at the moment, shared + // pool of instances should be used. + // + // https://github.com/paritytech/substrate/issues/7354 + let mut instance = match module.new_instance() { + Ok(instance) => instance, + Err(error) => { + panic!("failed to create new instance from module: {}", error) + }, + }; + + match instance + .call(InvokeMethod::TableWithWrapper { dispatcher_ref, func }, &data[..]) + { + Ok(result) => result, + Err(error) => panic!("failed to invoke instance: {}", error), + } + }); + + match result { + Ok(output) => { + let _ = sender.send(output); + }, + Err(error) => { + // If execution is panicked, the `join` in the original runtime code will + // panic as well, since the sender is dropped without sending anything. + tracing::error!(error = %error, "Call error in spawned task"); + }, + } + }), + ); + + new_handle + } + + fn join(&self, handle: u64) -> Vec { + let receiver = self.tasks.lock().remove(&handle).expect("No task for the handle"); + receiver.recv().expect("Spawned task panicked for the handle") + } +} + +impl RuntimeInstanceSpawn { + pub fn new( + module: Arc, + scheduler: Box, + ) -> Self { + Self { module, scheduler, counter: 0.into(), tasks: HashMap::new().into() } + } + + fn with_externalities_and_module( + module: Arc, + mut ext: &mut dyn Externalities, + ) -> Option { + ext.extension::() + .map(move |task_ext| Self::new(module, task_ext.clone())) + } +} + +/// Pre-registers the built-in extensions to the currently effective externalities. +/// +/// Meant to be called each time before calling into the runtime. +fn preregister_builtin_ext(module: Arc) { + sp_externalities::with_externalities(move |mut ext| { + if let Some(runtime_spawn) = + RuntimeInstanceSpawn::with_externalities_and_module(module, ext) + { + if let Err(e) = ext.register_extension(RuntimeSpawnExt(Box::new(runtime_spawn))) { + tracing::trace!( + target: "executor", + error = ?e, + "Failed to register `RuntimeSpawnExt` instance on externalities", + ) + } + } + }); +} + impl CodeExecutor for NativeElseWasmExecutor { type Error = Error; - fn call( + fn call< + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + UnwindSafe, + >( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, method: &str, data: &[u8], use_native: bool, - ) -> (Result>, bool) { + native_call: Option, + ) -> (Result>, bool) { tracing::trace!( target: "executor", function = %method, @@ -458,35 +603,57 @@ impl CodeExecutor for NativeElseWasmExecut let result = self.wasm.with_instance( runtime_code, ext, - |_, mut instance, onchain_version, mut ext| { + false, + |module, mut instance, onchain_version, mut ext| { let onchain_version = onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; let can_call_with = onchain_version.can_call_with(&self.native_version.runtime_version); - if use_native && can_call_with { - tracing::trace!( - target: "executor", - native = %self.native_version.runtime_version, - chain = %onchain_version, - "Request for native execution succeeded", - ); - - used_native = true; - Ok(with_externalities_safe(&mut **ext, move || D::dispatch(method, data))? - .ok_or_else(|| Error::MethodNotFound(method.to_owned()))) - } else { - if !can_call_with { + match (use_native, can_call_with, native_call) { + (_, false, _) | (false, _, _) => { + if !can_call_with { + tracing::trace!( + target: "executor", + native = %self.native_version.runtime_version, + chain = %onchain_version, + "Request for native execution failed", + ); + } + + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(method, data).map(NativeOrEncoded::Encoded) + }) + }, + (true, true, Some(call)) => { tracing::trace!( target: "executor", native = %self.native_version.runtime_version, chain = %onchain_version, - "Request for native execution failed", + "Request for native execution with native call succeeded" + ); + + used_native = true; + let res = with_externalities_safe(&mut **ext, move || (call)()) + .and_then(|r| r.map(NativeOrEncoded::Native).map_err(Error::ApiError)); + + Ok(res) + }, + _ => { + tracing::trace!( + target: "executor", + native = %self.native_version.runtime_version, + chain = %onchain_version, + "Request for native execution succeeded", ); - } - with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) + used_native = true; + Ok(with_externalities_safe(&mut **ext, move || D::dispatch(method, data))? + .map(NativeOrEncoded::Encoded) + .ok_or_else(|| Error::MethodNotFound(method.to_owned()))) + }, } }, ); diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 991802340db61..1dee739c50f9e 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -32,7 +32,6 @@ use sc_executor_common::{ use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode}; use sp_version::RuntimeVersion; use std::{ - num::NonZeroUsize, panic::AssertUnwindSafe, path::{Path, PathBuf}, sync::Arc, @@ -180,15 +179,17 @@ impl RuntimeCache { /// for caching. /// /// `runtime_cache_size` specifies the number of different runtimes versions preserved in an - /// in-memory cache, must always be at least 1. + /// in-memory cache. pub fn new( max_runtime_instances: usize, cache_path: Option, runtime_cache_size: u8, ) -> RuntimeCache { - let cap = - NonZeroUsize::new(runtime_cache_size.max(1) as usize).expect("cache size is not zero"); - RuntimeCache { runtimes: Mutex::new(LruCache::new(cap)), max_runtime_instances, cache_path } + RuntimeCache { + runtimes: Mutex::new(LruCache::new(runtime_cache_size.into())), + max_runtime_instances, + cache_path, + } } /// Prepares a WASM module instance and executes given function for it. diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 879af677ca042..46bacf54a42c6 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } log = "0.4.17" -wasmi = "0.13" +wasmi = "0.9.1" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sc-executor-common = { version = "0.10.0-dev", path = "../common" } sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime-interface" } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 1284cc23e4c96..e17707e158321 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -181,7 +181,6 @@ impl Sandbox for FunctionExecutor { let len = val_len as usize; - #[allow(deprecated)] let buffer = match self.memory.get(val_ptr.into(), len) { Err(_) => return Ok(sandbox_env::ERR_OUT_OF_BOUNDS), Ok(buffer) => buffer, @@ -569,7 +568,6 @@ fn call_in_wasm_module( match result { Ok(Some(I64(r))) => { let (ptr, length) = unpack_ptr_and_len(r as u64); - #[allow(deprecated)] memory.get(ptr, length as usize).map_err(|_| Error::Runtime) }, Err(e) => { diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index fc6d5db14aa1d..2dcfde378bb87 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -17,11 +17,8 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.0.0" } libc = "0.2.121" log = "0.4.17" -parity-wasm = "0.45" - -# When bumping wasmtime do not forget to also bump rustix -# to exactly the same version as used by wasmtime! -wasmtime = { version = "1.0.0", default-features = false, features = [ +parity-wasm = "0.42.0" +wasmtime = { version = "0.38.0", default-features = false, features = [ "cache", "cranelift", "jitdump", @@ -35,13 +32,8 @@ sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime- sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } sp-wasm-interface = { version = "6.0.0", features = ["wasmtime"], path = "../../../primitives/wasm-interface" } -# Here we include the rustix crate in the exactly same semver-compatible version as used by -# wasmtime and enable its 'use-libc' flag. -# -# By default rustix directly calls the appropriate syscalls completely bypassing libc; -# this doesn't have any actual benefits for us besides making it harder to debug memory -# problems (since then `mmap` etc. cannot be easily hooked into). -rustix = { version = "0.35.9", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } +[target.'cfg(target_os = "linux")'.dependencies] +rustix = { version = "0.35.6", default-features = false, features = ["std", "mm", "fs", "param"] } once_cell = "1.12.0" [dev-dependencies] diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index 768a6e36e2390..a54254810b68b 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -276,11 +276,12 @@ impl<'a> Sandbox for HostContext<'a> { .ok_or("Runtime doesn't have a table; sandbox is unavailable")?; let table_item = table.get(&mut self.caller, dispatch_thunk_id); - *table_item + table_item .ok_or("dispatch_thunk_id is out of bounds")? .funcref() .ok_or("dispatch_thunk_idx should be a funcref")? .ok_or("dispatch_thunk_idx should point to actual func")? + .clone() }; let guest_env = match sandbox::GuestEnvironment::decode(self.sandbox_store(), raw_env_def) { diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index feded4008068d..5d272accd3524 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -107,7 +107,8 @@ impl EntryPoint { ) -> std::result::Result { let entrypoint = func .typed::<(u32, u32), u64, _>(ctx) - .map_err(|_| "Invalid signature for direct entry point")?; + .map_err(|_| "Invalid signature for direct entry point")? + .clone(); Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) } @@ -118,7 +119,8 @@ impl EntryPoint { ) -> std::result::Result { let dispatcher = dispatcher .typed::<(u32, u32, u32), u64, _>(ctx) - .map_err(|_| "Invalid signature for wrapped entry point")?; + .map_err(|_| "Invalid signature for wrapped entry point")? + .clone(); Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) } } @@ -212,8 +214,9 @@ impl InstanceWrapper { Error::from(format!("Exported method {} is not found", method)) })?; let func = extern_func(&export) - .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; - EntryPoint::direct(*func, &self.store).map_err(|_| { + .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? + .clone(); + EntryPoint::direct(func, &self.store).map_err(|_| { Error::from(format!("Exported function '{}' has invalid signature.", method)) })? }, @@ -228,9 +231,10 @@ impl InstanceWrapper { let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? - .ok_or(Error::FunctionRefIsNull(func_ref))?; + .ok_or(Error::FunctionRefIsNull(func_ref))? + .clone(); - EntryPoint::direct(*func, &self.store).map_err(|_| { + EntryPoint::direct(func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for direct call.", func_ref, @@ -248,9 +252,10 @@ impl InstanceWrapper { let dispatcher = val .funcref() .ok_or(Error::TableElementIsNotAFunction(dispatcher_ref))? - .ok_or(Error::FunctionRefIsNull(dispatcher_ref))?; + .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? + .clone(); - EntryPoint::wrapped(*dispatcher, func, &self.store).map_err(|_| { + EntryPoint::wrapped(dispatcher, func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for wrapped call.", dispatcher_ref, @@ -310,8 +315,9 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result std::result::Result native_stack_max, - - // In `wasmtime` 0.35 the default stack size limit was changed from 1MB to 512KB. - // - // This broke at least one parachain which depended on the original 1MB limit, - // so here we restore it to what it was originally. - None => 1024 * 1024, - }; - - config.max_wasm_stack(native_stack_max as usize); + config + .profiler(profiler) + .map_err(|e| WasmError::Instantiation(format!("fail to set profiler: {:#}", e)))?; + + if let Some(DeterministicStackLimit { native_stack_max, .. }) = + semantics.deterministic_stack_limit + { + config + .max_wasm_stack(native_stack_max as usize) + .map_err(|e| WasmError::Other(format!("cannot set max wasm stack: {:#}", e)))?; + } config.parallel_compilation(semantics.parallel_compilation); @@ -395,6 +393,9 @@ fn common_config(semantics: &Semantics) -> std::result::Result, } -#[derive(Clone)] pub struct Config { /// The WebAssembly standard requires all imports of an instantiated module to be resolved, /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index 9126cb336bde6..e0fd9fbce0c57 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -122,7 +122,7 @@ impl RuntimeBuilder { self } - fn build(&mut self) -> impl WasmModule + '_ { + fn build<'a>(&'a mut self) -> impl WasmModule + 'a { let blob = { let wasm: Vec; @@ -174,85 +174,6 @@ impl RuntimeBuilder { } } -fn deep_call_stack_wat(depth: usize) -> String { - format!( - r#" - (module - (memory $0 32) - (export "memory" (memory $0)) - (global (export "__heap_base") i32 (i32.const 0)) - (func (export "overflow") call 0) - - (func $overflow (param $0 i32) - (block $label$1 - (br_if $label$1 - (i32.ge_u - (local.get $0) - (i32.const {depth}) - ) - ) - (call $overflow - (i32.add - (local.get $0) - (i32.const 1) - ) - ) - ) - ) - - (func (export "main") - (param i32 i32) (result i64) - (call $overflow (i32.const 0)) - (i64.const 0) - ) - ) - "# - ) -} - -// These two tests ensure that the `wasmtime`'s stack size limit and the amount of -// stack space used by a single stack frame doesn't suddenly change without us noticing. -// -// If they do (e.g. because we've pulled in a new version of `wasmtime`) we want to know -// that it did, regardless of how small the change was. -// -// If these tests starting failing it doesn't necessarily mean that something is broken; -// what it means is that one (or multiple) of the following has to be done: -// a) the tests may need to be updated for the new call depth, -// b) the stack limit may need to be changed to maintain backwards compatibility, -// c) the root cause of the new call depth limit determined, and potentially fixed, -// d) the new call depth limit may need to be validated to ensure it doesn't prevent any -// existing chain from syncing (if it was effectively decreased) - -// We need two limits here since depending on whether the code is compiled in debug -// or in release mode the maximum call depth is slightly different. -const CALL_DEPTH_LOWER_LIMIT: usize = 65478; -const CALL_DEPTH_UPPER_LIMIT: usize = 65514; - -test_wasm_execution!(test_consume_under_1mb_of_stack_does_not_trap); -fn test_consume_under_1mb_of_stack_does_not_trap(instantiation_strategy: InstantiationStrategy) { - let wat = deep_call_stack_wat(CALL_DEPTH_LOWER_LIMIT); - let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); - let runtime = builder.build(); - let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); - instance.call_export("main", &[]).unwrap(); -} - -test_wasm_execution!(test_consume_over_1mb_of_stack_does_trap); -fn test_consume_over_1mb_of_stack_does_trap(instantiation_strategy: InstantiationStrategy) { - let wat = deep_call_stack_wat(CALL_DEPTH_UPPER_LIMIT + 1); - let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); - let runtime = builder.build(); - let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); - match instance.call_export("main", &[]).unwrap_err() { - Error::AbortedDueToTrap(error) => { - let expected = "wasm trap: call stack exhausted"; - assert_eq!(error.message, expected); - }, - error => panic!("unexpected error: {:?}", error), - } -} - test_wasm_execution!(test_nan_canonicalization); fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { let mut builder = RuntimeBuilder::new(instantiation_strategy).canonicalize_nans(true); diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 288e579d8da29..a5f20b9f3261d 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -15,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.7.6" -array-bytes = "4.1" -async-trait = "0.1.57" +async-trait = "0.1.50" dyn-clone = "1.0" finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.21" futures-timer = "3.0.1" +hex = "0.4.2" log = "0.4.17" parity-scale-codec = { version = "3.0.0", features = ["derive"] } -parking_lot = "0.12.1" +parking_lot = "0.12.0" rand = "0.8.4" -serde_json = "1.0.85" +serde_json = "1.0.79" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } @@ -51,8 +51,12 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" -finality-grandpa = { version = "0.16.0", features = ["derive-codec", "test-helpers"] } +finality-grandpa = { version = "0.16.0", features = [ + "derive-codec", + "test-helpers", +] } serde = "1.0.136" +tempfile = "3.1.0" tokio = "1.17.0" sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-test = { version = "0.8.0", path = "../network/test" } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 235453ea35df1..25ed4a3f490e0 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -191,7 +191,7 @@ where "state is for completed round; completed rounds must have a prevote ghost; qed.", ); - let mut current_rounds = CurrentRounds::::new(); + let mut current_rounds = CurrentRounds::new(); current_rounds.insert(last_round_number + 1, HasVoted::No); let set_state = VoterSetState::Live { @@ -255,7 +255,7 @@ where let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - let mut current_rounds = CurrentRounds::::new(); + let mut current_rounds = CurrentRounds::new(); current_rounds.insert(last_round_number + 1, HasVoted::No); VoterSetState::Live { @@ -500,7 +500,7 @@ mod test { use super::*; use sp_core::{crypto::UncheckedFrom, H256}; use sp_finality_grandpa::AuthorityId; - use substrate_test_runtime_client::{self, runtime::Block}; + use substrate_test_runtime_client; fn dummy_id() -> AuthorityId { AuthorityId::unchecked_from([1; 32]) @@ -574,7 +574,7 @@ mod test { .unwrap(), ); - let mut current_rounds = CurrentRounds::::new(); + let mut current_rounds = CurrentRounds::new(); current_rounds.insert(round_number + 1, HasVoted::No); assert_eq!( @@ -667,7 +667,7 @@ mod test { .unwrap(), ); - let mut current_rounds = CurrentRounds::::new(); + let mut current_rounds = CurrentRounds::new(); current_rounds.insert(round_number + 1, HasVoted::No); assert_eq!( diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 218b4b668c10f..65d7dfb783aa3 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -35,8 +35,7 @@ //! impolite to send messages about r+1 or later. "future-round" messages can //! be dropped and ignored. //! -//! It is impolite to send a neighbor packet which moves backwards or does not progress -//! protocol state. +//! It is impolite to send a neighbor packet which moves backwards in protocol state. //! //! This is beneficial if it conveys some progress in the protocol state of the peer. //! @@ -90,15 +89,14 @@ use log::{debug, trace}; use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; -use sc_network::{PeerId, ReputationChange}; -use sc_network_common::protocol::role::ObservedRole; +use sc_network::{ObservedRole, PeerId, ReputationChange}; use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use super::{benefit, cost, Round, SetId, NEIGHBOR_REBROADCAST_PERIOD}; +use super::{benefit, cost, Round, SetId}; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; use std::{ @@ -149,15 +147,14 @@ enum Consider { /// A view of protocol state. #[derive(Debug)] struct View { - round: Round, // the current round we are at. - set_id: SetId, // the current voter set id. - last_commit: Option, // commit-finalized block height, if any. - last_update: Option, // last time we heard from peer, used for spamming detection. + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. + last_commit: Option, // commit-finalized block height, if any. } impl Default for View { fn default() -> Self { - View { round: Round(1), set_id: SetId(0), last_commit: None, last_update: None } + View { round: Round(1), set_id: SetId(0), last_commit: None } } } @@ -227,12 +224,7 @@ impl LocalView { /// Converts the local view to a `View` discarding round and set id /// information about the last commit. fn as_view(&self) -> View<&N> { - View { - round: self.round, - set_id: self.set_id, - last_commit: self.last_commit_height(), - last_update: None, - } + View { round: self.round, set_id: self.set_id, last_commit: self.last_commit_height() } } /// Update the set ID. implies a reset to round 1. @@ -357,7 +349,7 @@ pub(super) struct VoteMessage { /// The voter set ID this message is from. pub(super) set_id: SetId, /// The message itself. - pub(super) message: SignedMessage, + pub(super) message: SignedMessage, } /// Network level commit message with topic information. @@ -368,7 +360,7 @@ pub(super) struct FullCommitMessage { /// The voter set ID this message is from. pub(super) set_id: SetId, /// The compact commit message. - pub(super) message: CompactCommit, + pub(super) message: CompactCommit, } /// V1 neighbor packet. Neighbor packets are sent from nodes to their peers @@ -413,7 +405,7 @@ pub(super) struct FullCatchUpMessage { /// The voter set ID this message is from. pub(super) set_id: SetId, /// The compact commit message. - pub(super) message: CatchUp, + pub(super) message: CatchUp, } /// Misbehavior that peers can perform. @@ -424,8 +416,6 @@ pub(super) struct FullCatchUpMessage { pub(super) enum Misbehavior { // invalid neighbor message, considering the last one. InvalidViewChange, - // duplicate neighbor message. - DuplicateNeighborMessage, // could not decode neighbor message. bytes-length of the packet. UndecodablePacket(i32), // Bad catch up message (invalid signatures). @@ -447,7 +437,6 @@ impl Misbehavior { match *self { InvalidViewChange => cost::INVALID_VIEW_CHANGE, - DuplicateNeighborMessage => cost::DUPLICATE_NEIGHBOR_MESSAGE, UndecodablePacket(bytes) => ReputationChange::new( bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), "Grandpa: Bad packet", @@ -498,22 +487,20 @@ struct Peers { second_stage_peers: HashSet, /// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to. lucky_light_peers: HashSet, - /// Neighbor packet rebroadcast period --- we reduce the reputation of peers sending duplicate - /// packets too often. - neighbor_rebroadcast_period: Duration, } -impl Peers { - fn new(neighbor_rebroadcast_period: Duration) -> Self { +impl Default for Peers { + fn default() -> Self { Peers { inner: Default::default(), first_stage_peers: Default::default(), second_stage_peers: Default::default(), lucky_light_peers: Default::default(), - neighbor_rebroadcast_period, } } +} +impl Peers { fn new_peer(&mut self, who: PeerId, role: ObservedRole) { match role { ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { @@ -559,23 +546,10 @@ impl Peers { return Err(Misbehavior::InvalidViewChange) } - let now = Instant::now(); - let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) == - (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); - - if duplicate_packet { - if let Some(last_update) = peer.view.last_update { - if now < last_update + self.neighbor_rebroadcast_period / 2 { - return Err(Misbehavior::DuplicateNeighborMessage) - } - } - } - peer.view = View { round: update.round, set_id: update.set_id, last_commit: Some(update.commit_finalized_height), - last_update: Some(now), }; trace!(target: "afg", "Peer {} updated view. Now at {:?}, {:?}", @@ -773,7 +747,7 @@ impl Inner { Inner { local_view: None, - peers: Peers::new(NEIGHBOR_REBROADCAST_PERIOD), + peers: Peers::default(), live_topics: KeepTopics::new(), next_rebroadcast: Instant::now() + REBROADCAST_AFTER, authorities: Vec::new(), @@ -783,16 +757,13 @@ impl Inner { } } - /// Note a round in the current set has started. Does nothing if the last - /// call to the function was with the same `round`. + /// Note a round in the current set has started. fn note_round(&mut self, round: Round) -> MaybeMessage { { let local_view = match self.local_view { None => return None, Some(ref mut v) => if v.round == round { - // Do not send neighbor packets out if `round` has not changed --- - // such behavior is punishable. return None } else { v @@ -831,8 +802,6 @@ impl Inner { ); self.authorities = authorities; } - // Do not send neighbor packets out if the `set_id` has not changed --- - // such behavior is punishable. return None } else { v @@ -846,9 +815,7 @@ impl Inner { self.multicast_neighbor_packet() } - /// Note that we've imported a commit finalizing a given block. Does nothing if the last - /// call to the function was with the same or higher `finalized` number. - /// `set_id` & `round` are the ones the commit message is from. + /// Note that we've imported a commit finalizing a given block. fn note_commit_finalized( &mut self, round: Round, @@ -1103,7 +1070,7 @@ impl Inner { let (base_hash, base_number) = last_completed_round.base; - let catch_up = CatchUp:: { + let catch_up = CatchUp:: { round_number: last_completed_round.number, prevotes, precommits, @@ -1389,8 +1356,6 @@ impl GossipValidator { } /// Note that we've imported a commit finalizing a given block. - /// `set_id` & `round` are the ones the commit message is from and not necessarily - /// the latest set ID & round started. pub(super) fn note_commit_finalized( &self, round: Round, @@ -1681,13 +1646,12 @@ pub(super) struct PeerReport { #[cfg(test)] mod tests { - use super::{super::NEIGHBOR_REBROADCAST_PERIOD, environment::SharedVoterSetState, *}; + use super::{environment::SharedVoterSetState, *}; use crate::communication; use sc_network::config::Role; use sc_network_gossip::Validator as GossipValidatorT; + use sc_network_test::Block; use sp_core::{crypto::UncheckedFrom, H256}; - use std::time::Instant; - use substrate_test_runtime_client::runtime::{Block, Header}; // some random config (not really needed) fn config() -> crate::Config { @@ -1719,12 +1683,7 @@ mod tests { #[test] fn view_vote_rules() { - let view = View { - round: Round(100), - set_id: SetId(1), - last_commit: Some(1000u64), - last_update: None, - }; + let view = View { round: Round(100), set_id: SetId(1), last_commit: Some(1000u64) }; assert_eq!(view.consider_vote(Round(98), SetId(1)), Consider::RejectPast); assert_eq!(view.consider_vote(Round(1), SetId(0)), Consider::RejectPast); @@ -1741,12 +1700,7 @@ mod tests { #[test] fn view_global_message_rules() { - let view = View { - round: Round(100), - set_id: SetId(2), - last_commit: Some(1000u64), - last_update: None, - }; + let view = View { round: Round(100), set_id: SetId(2), last_commit: Some(1000u64) }; assert_eq!(view.consider_global(SetId(3), 1), Consider::RejectFuture); assert_eq!(view.consider_global(SetId(3), 1000), Consider::RejectFuture); @@ -1764,7 +1718,7 @@ mod tests { #[test] fn unknown_peer_cannot_be_updated() { - let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); + let mut peers = Peers::default(); let id = PeerId::random(); let update = @@ -1774,7 +1728,7 @@ mod tests { assert!(res.unwrap().is_none()); // connect & disconnect. - peers.new_peer(id, ObservedRole::Authority); + peers.new_peer(id.clone(), ObservedRole::Authority); peers.peer_disconnected(&id); let res = peers.update_peer_state(&id, update.clone()); @@ -1795,38 +1749,30 @@ mod tests { let update4 = NeighborPacket { round: Round(3), set_id: SetId(11), commit_finalized_height: 80 }; - // Use shorter rebroadcast period to safely roll the clock back in the last test - // and don't hit the system boot time on systems with unsigned time. - const SHORT_NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(1); - let mut peers = Peers::new(SHORT_NEIGHBOR_REBROADCAST_PERIOD); + let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id, ObservedRole::Authority); + peers.new_peer(id.clone(), ObservedRole::Authority); - let check_update = |peers: &mut Peers<_>, update: NeighborPacket<_>| { + let mut check_update = move |update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); assert_eq!(view.round, update.round); assert_eq!(view.set_id, update.set_id); assert_eq!(view.last_commit, Some(update.commit_finalized_height)); }; - check_update(&mut peers, update1); - check_update(&mut peers, update2); - check_update(&mut peers, update3); - check_update(&mut peers, update4.clone()); - - // Allow duplicate neighbor packets if enough time has passed. - peers.inner.get_mut(&id).unwrap().view.last_update = - Some(Instant::now() - SHORT_NEIGHBOR_REBROADCAST_PERIOD); - check_update(&mut peers, update4); + check_update(update1); + check_update(update2); + check_update(update3); + check_update(update4); } #[test] fn invalid_view_change() { - let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); + let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id, ObservedRole::Authority); + peers.new_peer(id.clone(), ObservedRole::Authority); peers .update_peer_state( @@ -1836,41 +1782,29 @@ mod tests { .unwrap() .unwrap(); - let mut check_update = move |update: NeighborPacket<_>, misbehavior| { + let mut check_update = move |update: NeighborPacket<_>| { let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); - assert_eq!(err, misbehavior); + assert_eq!(err, Misbehavior::InvalidViewChange); }; // round moves backwards. - check_update( - NeighborPacket { round: Round(9), set_id: SetId(10), commit_finalized_height: 10 }, - Misbehavior::InvalidViewChange, - ); - // set ID moves backwards. - check_update( - NeighborPacket { round: Round(10), set_id: SetId(9), commit_finalized_height: 10 }, - Misbehavior::InvalidViewChange, - ); + check_update(NeighborPacket { + round: Round(9), + set_id: SetId(10), + commit_finalized_height: 10, + }); // commit finalized height moves backwards. - check_update( - NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 9 }, - Misbehavior::InvalidViewChange, - ); - // duplicate packet without grace period. - check_update( - NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, - Misbehavior::DuplicateNeighborMessage, - ); - // commit finalized height moves backwards while round moves forward. - check_update( - NeighborPacket { round: Round(11), set_id: SetId(10), commit_finalized_height: 9 }, - Misbehavior::InvalidViewChange, - ); - // commit finalized height moves backwards while set ID moves forward. - check_update( - NeighborPacket { round: Round(10), set_id: SetId(11), commit_finalized_height: 9 }, - Misbehavior::InvalidViewChange, - ); + check_update(NeighborPacket { + round: Round(10), + set_id: SetId(10), + commit_finalized_height: 9, + }); + // set ID moves backwards. + check_update(NeighborPacket { + round: Round(10), + set_id: SetId(9), + commit_finalized_height: 10, + }); } #[test] @@ -1921,7 +1855,7 @@ mod tests { &VoteMessage { round: Round(1), set_id: SetId(set_id), - message: SignedMessage::

{ + message: SignedMessage:: { message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { target_hash: Default::default(), target_number: 10, @@ -1937,7 +1871,7 @@ mod tests { &VoteMessage { round: Round(1), set_id: SetId(set_id), - message: SignedMessage::
{ + message: SignedMessage:: { message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { target_hash: Default::default(), target_number: 10, @@ -2008,7 +1942,7 @@ mod tests { votes: Default::default(), }); - let mut current_rounds = environment::CurrentRounds::::new(); + let mut current_rounds = environment::CurrentRounds::new(); current_rounds.insert(3, environment::HasVoted::No); let set_state = @@ -2029,7 +1963,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let mut inner = val.inner.write(); - inner.peers.new_peer(peer, ObservedRole::Authority); + inner.peers.new_peer(peer.clone(), ObservedRole::Authority); let res = inner.handle_catch_up_request( &peer, @@ -2070,7 +2004,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); let send_request = |set_id, round| { let mut inner = val.inner.write(); @@ -2125,7 +2059,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); let import_neighbor_message = |set_id, round| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2195,7 +2129,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); // importing a neighbor message from a peer in the same set in a later // round should lead to a catch up request but since they're disabled @@ -2223,8 +2157,11 @@ mod tests { let peer_authority = PeerId::random(); let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_authority, ObservedRole::Authority); - val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); + val.inner + .write() + .peers + .new_peer(peer_authority.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); let import_neighbor_message = |peer| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2273,7 +2210,7 @@ mod tests { // add the peer making the requests to the validator, otherwise it is // discarded. let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); + val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer_full, @@ -2333,9 +2270,12 @@ mod tests { full_nodes.resize_with(30, || PeerId::random()); for i in 0..30 { - val.inner.write().peers.new_peer(authorities[i], ObservedRole::Authority); + val.inner + .write() + .peers + .new_peer(authorities[i].clone(), ObservedRole::Authority); - val.inner.write().peers.new_peer(full_nodes[i], ObservedRole::Full); + val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); } let test = |rounds_elapsed, peers| { @@ -2414,7 +2354,7 @@ mod tests { // add a new light client as peer let light_peer = PeerId::random(); - val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); + val.inner.write().peers.new_peer(light_peer.clone(), ObservedRole::Light); assert!(!val.message_allowed()( &light_peer, @@ -2486,7 +2426,7 @@ mod tests { // add a new peer at set id 1 let peer1 = PeerId::random(); - val.inner.write().peers.new_peer(peer1, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer1.clone(), ObservedRole::Authority); val.inner .write() @@ -2499,7 +2439,7 @@ mod tests { // peer2 will default to set id 0 let peer2 = PeerId::random(); - val.inner.write().peers.new_peer(peer2, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer2.clone(), ObservedRole::Authority); // create a commit for round 1 of set id 1 // targeting a block at height 2 diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 75a7697812c6c..378501cffdd62 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -37,7 +37,6 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::Duration, }; use finality_grandpa::{ @@ -46,7 +45,7 @@ use finality_grandpa::{ Message::{Precommit, Prevote, PrimaryPropose}, }; use parity_scale_codec::{Decode, Encode}; -use sc_network::ReputationChange; +use sc_network::{NetworkService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_keystore::SyncCryptoStorePtr; @@ -59,7 +58,6 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; -use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; @@ -69,12 +67,8 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; -// How often to rebroadcast neighbor packets, in cases where no new packets are created. -pub(crate) const NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(2 * 60); - pub mod grandpa_protocol_name { use sc_chain_spec::ChainSpec; - use sc_network_common::protocol::ProtocolName; pub(crate) const NAME: &str = "/grandpa/1"; /// Old names for the notifications protocol, used for backward compatibility. @@ -86,11 +80,10 @@ pub mod grandpa_protocol_name { pub fn standard_name>( genesis_hash: &Hash, chain_spec: &Box, - ) -> ProtocolName { - let genesis_hash = genesis_hash.as_ref(); + ) -> std::borrow::Cow<'static, str> { let chain_prefix = match chain_spec.fork_id() { - Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), - None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), + Some(fork_id) => format!("/{}/{}", hex::encode(genesis_hash), fork_id), + None => format!("/{}", hex::encode(genesis_hash)), }; format!("{}{}", chain_prefix, NAME).into() } @@ -107,8 +100,6 @@ mod cost { pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "Grandpa: Unknown voter"); pub(super) const INVALID_VIEW_CHANGE: Rep = Rep::new(-500, "Grandpa: Invalid view change"); - pub(super) const DUPLICATE_NEIGHBOR_MESSAGE: Rep = - Rep::new(-500, "Grandpa: Duplicate neighbor message without grace period"); pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; pub(super) const PER_BLOCK_LOADED: i32 = -10; @@ -165,26 +156,34 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// /// Something that provides both the capabilities needed for the `gossip_network::Network` trait as /// well as the ability to set a fork sync request for a particular block. -pub trait Network: - NetworkSyncForkRequest> - + NetworkBlock> - + GossipNetwork - + Clone - + Send - + 'static -{ +pub trait Network: GossipNetwork + Clone + Send + 'static { + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl Network for T +impl Network for Arc> where - Block: BlockT, - T: NetworkSyncForkRequest> - + NetworkBlock> - + GossipNetwork - + Clone - + Send - + 'static, + B: BlockT, + H: sc_network::ExHashT, { + fn set_sync_fork_request( + &self, + peers: Vec, + hash: B::Hash, + number: NumberFor, + ) { + NetworkService::set_sync_fork_request(self, peers, hash, number) + } } /// Create a unique topic for a round and set-id combo. @@ -285,7 +284,7 @@ impl> NetworkBridge { } let (neighbor_packet_worker, neighbor_packet_sender) = - periodic::NeighborPacketWorker::new(NEIGHBOR_REBROADCAST_PERIOD); + periodic::NeighborPacketWorker::new(); NetworkBridge { service, @@ -319,8 +318,8 @@ impl> NetworkBridge { round: Round, set_id: SetId, voters: Arc>, - has_voted: HasVoted, - ) -> (impl Stream> + Unpin, OutgoingMessages) { + has_voted: HasVoted, + ) -> (impl Stream> + Unpin, OutgoingMessages) { self.note_round(round, set_id, &voters); let keystore = keystore.and_then(|ks| { @@ -468,7 +467,7 @@ impl> NetworkBridge { hash: B::Hash, number: NumberFor, ) { - self.service.set_sync_fork_request(peers, hash, number) + Network::set_sync_fork_request(&self.service, peers, hash, number) } } @@ -682,15 +681,15 @@ pub(crate) struct OutgoingMessages { round: RoundNumber, set_id: SetIdNumber, keystore: Option, - sender: mpsc::Sender>, + sender: mpsc::Sender>, network: Arc>>, - has_voted: HasVoted, + has_voted: HasVoted, telemetry: Option, } impl Unpin for OutgoingMessages {} -impl Sink> for OutgoingMessages { +impl Sink> for OutgoingMessages { type Error = Error; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { @@ -701,10 +700,7 @@ impl Sink> for OutgoingMessages { }) } - fn start_send( - mut self: Pin<&mut Self>, - mut msg: Message, - ) -> Result<(), Self::Error> { + fn start_send(mut self: Pin<&mut Self>, mut msg: Message) -> Result<(), Self::Error> { // if we've voted on this round previously under the same key, send that vote instead match &mut msg { finality_grandpa::Message::PrimaryPropose(ref mut vote) => { @@ -794,7 +790,7 @@ impl Sink> for OutgoingMessages { // checks a compact commit. returns the cost associated with processing it if // the commit was bad. fn check_compact_commit( - msg: &CompactCommit, + msg: &CompactCommit, voters: &VoterSet, round: Round, set_id: SetId, @@ -862,7 +858,7 @@ fn check_compact_commit( // checks a catch up. returns the cost associated with processing it if // the catch up was bad. fn check_catch_up( - msg: &CatchUp, + msg: &CatchUp, voters: &VoterSet, set_id: SetId, telemetry: Option, @@ -912,7 +908,7 @@ fn check_catch_up( ) -> Result where B: BlockT, - I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, + I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, { use crate::communication::gossip::Misbehavior; @@ -1006,7 +1002,7 @@ impl CommitsOut { } } -impl Sink<(RoundNumber, Commit)> for CommitsOut { +impl Sink<(RoundNumber, Commit)> for CommitsOut { type Error = Error; fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { @@ -1015,7 +1011,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut, - input: (RoundNumber, Commit), + input: (RoundNumber, Commit), ) -> Result<(), Self::Error> { if !self.is_voter { return Ok(()) @@ -1037,7 +1033,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { + let compact_commit = CompactCommit:: { target_hash: commit.target_hash, target_number: commit.target_number, precommits, diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index c001796b5ca5d..e6d63beafc362 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -32,6 +32,9 @@ use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; use sp_runtime::traits::{Block as BlockT, NumberFor}; +// How often to rebroadcast, in cases where no new packets are created. +const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); + /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( @@ -57,7 +60,6 @@ impl NeighborPacketSender { /// implementation). Periodically it sends out the last packet in cases where no new ones arrive. pub(super) struct NeighborPacketWorker { last: Option<(Vec, NeighborPacket>)>, - rebroadcast_period: Duration, delay: Delay, rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, } @@ -65,16 +67,13 @@ pub(super) struct NeighborPacketWorker { impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { - pub(super) fn new(rebroadcast_period: Duration) -> (Self, NeighborPacketSender) { + pub(super) fn new() -> (Self, NeighborPacketSender) { let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( "mpsc_grandpa_neighbor_packet_worker", ); - let delay = Delay::new(rebroadcast_period); + let delay = Delay::new(REBROADCAST_AFTER); - ( - NeighborPacketWorker { last: None, rebroadcast_period, delay, rx }, - NeighborPacketSender(tx), - ) + (NeighborPacketWorker { last: None, delay, rx }, NeighborPacketSender(tx)) } } @@ -86,7 +85,7 @@ impl Stream for NeighborPacketWorker { match this.rx.poll_next_unpin(cx) { Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some((to, packet))) => { - this.delay.reset(this.rebroadcast_period); + this.delay.reset(REBROADCAST_AFTER); this.last = Some((to.clone(), packet.clone())); return Poll::Ready(Some((to, GossipMessage::::from(packet)))) @@ -99,7 +98,7 @@ impl Stream for NeighborPacketWorker { // Getting this far here implies that the timer fired. - this.delay.reset(this.rebroadcast_period); + this.delay.reset(REBROADCAST_AFTER); // Make sure the underlying task is scheduled for wake-up. // diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index eab7bb2df50cf..0ec5092a2a047 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -25,15 +25,7 @@ use super::{ use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; use futures::prelude::*; use parity_scale_codec::Encode; -use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; -use sc_network_common::{ - config::MultiaddrWithPeerId, - protocol::{event::Event as NetworkEvent, role::ObservedRole, ProtocolName}, - service::{ - NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, - NetworkSyncForkRequest, NotificationSender, NotificationSenderError, - }, -}; +use sc_network::{config::Role, Event as NetworkEvent, ObservedRole, PeerId}; use sc_network_gossip::Validator; use sc_network_test::{Block, Hash}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -41,7 +33,7 @@ use sp_finality_grandpa::AuthorityList; use sp_keyring::Ed25519Keyring; use sp_runtime::traits::NumberFor; use std::{ - collections::HashSet, + borrow::Cow, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -50,8 +42,8 @@ use std::{ #[derive(Debug)] pub(crate) enum Event { EventStream(TracingUnboundedSender), - WriteNotification(PeerId, Vec), - Report(PeerId, ReputationChange), + WriteNotification(sc_network::PeerId, Vec), + Report(sc_network::PeerId, sc_network::ReputationChange), Announce(Hash), } @@ -60,130 +52,57 @@ pub(crate) struct TestNetwork { sender: TracingUnboundedSender, } -impl NetworkPeers for TestNetwork { - fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!(); - } - - fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!(); - } - - fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { - unimplemented!(); +impl sc_network_gossip::Network for TestNetwork { + fn event_stream(&self) -> Pin + Send>> { + let (tx, rx) = tracing_unbounded("test"); + let _ = self.sender.unbounded_send(Event::EventStream(tx)); + Box::pin(rx) } - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) {} - - fn accept_unreserved_peers(&self) { - unimplemented!(); - } - - fn deny_unreserved_peers(&self) { - unimplemented!(); - } - - fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { - unimplemented!(); - } - - fn remove_reserved_peer(&self, _peer_id: PeerId) { - unimplemented!(); - } - - fn set_reserved_peers( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn add_peers_to_reserved_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn add_to_peers_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {} - fn sync_num_connected(&self) -> usize { - unimplemented!(); + fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } -} -impl NetworkEventStream for TestNetwork { - fn event_stream( - &self, - _name: &'static str, - ) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test"); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) + fn announce(&self, block: Hash, _associated_data: Option>) { + let _ = self.sender.unbounded_send(Event::Announce(block)); } } -impl NetworkNotification for TestNetwork { - fn write_notification(&self, target: PeerId, _protocol: ProtocolName, message: Vec) { - let _ = self.sender.unbounded_send(Event::WriteNotification(target, message)); - } - - fn notification_sender( +impl super::Network for TestNetwork { + fn set_sync_fork_request( &self, - _target: PeerId, - _protocol: ProtocolName, - ) -> Result, NotificationSenderError> { - unimplemented!(); - } -} - -impl NetworkBlock> for TestNetwork { - fn announce_block(&self, hash: Hash, _data: Option>) { - let _ = self.sender.unbounded_send(Event::Announce(hash)); - } - - fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { - unimplemented!(); + _peers: Vec, + _hash: Hash, + _number: NumberFor, + ) { } } -impl NetworkSyncForkRequest> for TestNetwork { - fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} -} - impl sc_network_gossip::ValidatorContext for TestNetwork { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, who: &PeerId, data: Vec) { - ::write_notification( + fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { + >::write_notification( self, - *who, + who.clone(), grandpa_protocol_name::NAME.into(), data, ); } - fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { @@ -280,7 +199,7 @@ pub(crate) fn make_test_network() -> (impl Future, TestNetwork) } fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() + keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() } struct NoopContext; @@ -288,8 +207,8 @@ struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, _: &PeerId, _: Vec) {} - fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} + fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } #[test] @@ -305,7 +224,8 @@ fn good_commit_leads_to_relay() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = finality_grandpa::Precommit { target_hash, target_number }; + let precommit = + finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; let payload = sp_finality_grandpa::localized_payload( round, set_id, @@ -332,7 +252,7 @@ fn good_commit_leads_to_relay() { }) .encode(); - let id = PeerId::random(); + let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -361,19 +281,19 @@ fn good_commit_leads_to_relay() { // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to // send a message. - let sender_id = id; + let sender_id = id.clone(); let send_message = tester.filter_network_events(move |event| match event { Event::EventStream(sender) => { // Add the sending peer and send the commit let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id, + remote: sender_id.clone(), protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id, + remote: sender_id.clone(), messages: vec![( grandpa_protocol_name::NAME.into(), commit_to_send.clone().into(), @@ -381,9 +301,9 @@ fn good_commit_leads_to_relay() { }); // Add a random peer which will be the recipient of this message - let receiver_id = PeerId::random(); + let receiver_id = sc_network::PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: receiver_id, + remote: receiver_id.clone(), protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, @@ -455,7 +375,8 @@ fn bad_commit_leads_to_report() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = finality_grandpa::Precommit { target_hash, target_number }; + let precommit = + finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; let payload = sp_finality_grandpa::localized_payload( round, set_id, @@ -482,7 +403,7 @@ fn bad_commit_leads_to_report() { }) .encode(); - let id = PeerId::random(); + let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -511,17 +432,17 @@ fn bad_commit_leads_to_report() { // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to // send a message. - let sender_id = id; + let sender_id = id.clone(); let send_message = tester.filter_network_events(move |event| match event { Event::EventStream(sender) => { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id, + remote: sender_id.clone(), protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id, + remote: sender_id.clone(), messages: vec![( grandpa_protocol_name::NAME.into(), commit_to_send.clone().into(), @@ -563,7 +484,7 @@ fn bad_commit_leads_to_report() { #[test] fn peer_with_higher_view_leads_to_catch_up_request() { - let id = PeerId::random(); + let id = sc_network::PeerId::random(); let (tester, mut net) = make_test_network(); let test = tester @@ -643,7 +564,7 @@ fn grandpa_protocol_name() { // Create protocol name using random genesis hash. let genesis_hash = sp_core::H256::random(); - let expected = format!("/{}/grandpa/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); + let expected = format!("/{}/grandpa/1", hex::encode(genesis_hash)); let proto_name = grandpa_protocol_name::standard_name(&genesis_hash, &chain_spec); assert_eq!(proto_name.to_string(), expected); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f235c3a86c04e..63c8697053842 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -81,7 +81,7 @@ pub struct CompletedRound { /// The target block base used for voting in the round. pub base: (Block::Hash, NumberFor), /// All the votes observed in the round. - pub votes: Vec>, + pub votes: Vec>, } // Data about last completed rounds within a single voter set. Stores @@ -170,7 +170,7 @@ impl CompletedRounds { /// A map with voter status information for currently live rounds, /// which votes have we cast and what are they. -pub type CurrentRounds = BTreeMap::Header>>; +pub type CurrentRounds = BTreeMap>; /// The state of the current voter set, whether it is currently active or not /// and information related to the previously completed rounds. Current round @@ -214,7 +214,7 @@ impl VoterSetState { authority_set, ); - let mut current_rounds = CurrentRounds::::new(); + let mut current_rounds = CurrentRounds::new(); current_rounds.insert(1, HasVoted::No); VoterSetState::Live { completed_rounds, current_rounds } @@ -258,27 +258,27 @@ impl VoterSetState { /// Whether we've voted already during a prior run of the program. #[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub enum HasVoted { +pub enum HasVoted { /// Has not voted already in this round. No, /// Has voted in this round. - Yes(AuthorityId, Vote
), + Yes(AuthorityId, Vote), } /// The votes cast by this voter already during a prior run of the program. #[derive(Debug, Clone, Decode, Encode, PartialEq)] -pub enum Vote { +pub enum Vote { /// Has cast a proposal. - Propose(PrimaryPropose
), + Propose(PrimaryPropose), /// Has cast a prevote. - Prevote(Option>, Prevote
), + Prevote(Option>, Prevote), /// Has cast a precommit (implies prevote.) - Precommit(Option>, Prevote
, Precommit
), + Precommit(Option>, Prevote, Precommit), } -impl HasVoted
{ +impl HasVoted { /// Returns the proposal we should vote with (if any.) - pub fn propose(&self) -> Option<&PrimaryPropose
> { + pub fn propose(&self) -> Option<&PrimaryPropose> { match self { HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), HasVoted::Yes(_, Vote::Prevote(propose, _)) | @@ -288,7 +288,7 @@ impl HasVoted
{ } /// Returns the prevote we should vote with (if any.) - pub fn prevote(&self) -> Option<&Prevote
> { + pub fn prevote(&self) -> Option<&Prevote> { match self { HasVoted::Yes(_, Vote::Prevote(_, prevote)) | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), @@ -297,7 +297,7 @@ impl HasVoted
{ } /// Returns the precommit we should vote with (if any.) - pub fn precommit(&self) -> Option<&Precommit
> { + pub fn precommit(&self) -> Option<&Precommit> { match self { HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => Some(precommit), _ => None, @@ -368,7 +368,7 @@ impl SharedVoterSetState { } /// Return vote status information for the current round. - pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { + pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { match &*self.inner.read() { VoterSetState::Live { current_rounds, .. } => current_rounds .get(&round) @@ -771,7 +771,7 @@ where fn proposed( &self, round: RoundNumber, - propose: PrimaryPropose, + propose: PrimaryPropose, ) -> Result<(), Self::Error> { let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, @@ -811,17 +811,13 @@ where Ok(()) } - fn prevoted( - &self, - round: RoundNumber, - prevote: Prevote, - ) -> Result<(), Self::Error> { + fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; - let report_prevote_metrics = |prevote: &Prevote| { + let report_prevote_metrics = |prevote: &Prevote| { telemetry!( self.telemetry; CONSENSUS_DEBUG; @@ -877,14 +873,14 @@ where fn precommitted( &self, round: RoundNumber, - precommit: Precommit, + precommit: Precommit, ) -> Result<(), Self::Error> { let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; - let report_precommit_metrics = |precommit: &Precommit| { + let report_precommit_metrics = |precommit: &Precommit| { telemetry!( self.telemetry; CONSENSUS_DEBUG; @@ -1069,7 +1065,7 @@ where hash: Block::Hash, number: NumberFor, round: RoundNumber, - commit: Commit, + commit: Commit, ) -> Result<(), Self::Error> { finalize_block( self.client.clone(), @@ -1096,11 +1092,7 @@ where fn prevote_equivocation( &self, _round: RoundNumber, - equivocation: finality_grandpa::Equivocation< - Self::Id, - Prevote, - Self::Signature, - >, + equivocation: finality_grandpa::Equivocation, Self::Signature>, ) { warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); if let Err(err) = self.report_equivocation(equivocation.into()) { @@ -1111,11 +1103,7 @@ where fn precommit_equivocation( &self, _round: RoundNumber, - equivocation: finality_grandpa::Equivocation< - Self::Id, - Precommit, - Self::Signature, - >, + equivocation: finality_grandpa::Equivocation, Self::Signature>, ) { warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); if let Err(err) = self.report_equivocation(equivocation.into()) { @@ -1126,11 +1114,11 @@ where pub(crate) enum JustificationOrCommit { Justification(GrandpaJustification), - Commit((RoundNumber, Commit)), + Commit((RoundNumber, Commit)), } -impl From<(RoundNumber, Commit)> for JustificationOrCommit { - fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { +impl From<(RoundNumber, Commit)> for JustificationOrCommit { + fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { JustificationOrCommit::Commit(commit) } } @@ -1352,7 +1340,7 @@ where // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. client - .apply_finality(import_op, hash, persisted_justification, true) + .apply_finality(import_op, BlockId::Hash(hash), persisted_justification, true) .map_err(|e| { warn!(target: "afg", "Error applying finality to block {:?}: {}", (hash, number), e); e diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 453b41bc63468..ac243a1633ee1 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -183,9 +183,7 @@ where } }, AuthoritySetChangeId::Set(_, last_block_for_set) => { - let last_block_for_set_id = backend - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(last_block_for_set))?; + let last_block_for_set_id = BlockId::Number(last_block_for_set); let justification = if let Some(grandpa_justification) = backend .blockchain() .justifications(last_block_for_set_id)? @@ -311,8 +309,7 @@ mod tests { } for block in to_finalize { - let hash = blocks[*block as usize - 1].hash(); - client.finalize_block(hash, None).unwrap(); + client.finalize_block(BlockId::Number(*block), None).unwrap(); } (client, backend, blocks) } @@ -383,13 +380,8 @@ mod tests { precommits: Vec::new(), }; - let grandpa_just: GrandpaJustification = - sp_finality_grandpa::GrandpaJustification::
{ - round: 8, - votes_ancestries: Vec::new(), - commit, - } - .into(); + let grandpa_just = + GrandpaJustification:: { round: 8, votes_ancestries: Vec::new(), commit }; let finality_proof = FinalityProof { block: header(2).hash(), @@ -492,7 +484,7 @@ mod tests { let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); client - .finalize_block(block8.hash(), Some((ID, grandpa_just8.encode().clone()))) + .finalize_block(BlockId::Number(8), Some((ID, grandpa_just8.encode().clone()))) .unwrap(); // Authority set change at block 8, so the justification stored there will be used in the diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 3715287eea31f..b5a0d7be70f19 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -424,15 +424,13 @@ where } /// Read current set id form a given state. - fn current_set_id(&self, hash: Block::Hash) -> Result { - let id = &BlockId::hash(hash); + fn current_set_id(&self, id: &BlockId) -> Result { let runtime_version = self.inner.runtime_api().version(id).map_err(|e| { ConsensusError::ClientImport(format!( "Unable to retrieve current runtime version. {}", e )) })?; - if runtime_version .api_version(&>::ID) .map_or(false, |v| v < 3) @@ -441,8 +439,7 @@ where // This code may be removed once warp sync to an old runtime is no longer needed. for prefix in ["GrandpaFinality", "Grandpa"] { let k = [twox_128(prefix.as_bytes()), twox_128(b"CurrentSetId")].concat(); - if let Ok(Some(id)) = - self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) + if let Ok(Some(id)) = self.inner.storage(id, &sc_client_api::StorageKey(k.to_vec())) { if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { return Ok(id) @@ -475,12 +472,13 @@ where // finality proofs and that the state is correct and final. // So we can read the authority list and set id from the state. self.authority_set_hard_forks.clear(); + let block_id = BlockId::hash(hash); let authorities = self .inner .runtime_api() - .grandpa_authorities(&BlockId::hash(hash)) + .grandpa_authorities(&block_id) .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let set_id = self.current_set_id(hash)?; + let set_id = self.current_set_id(&block_id)?; let authority_set = AuthoritySet::new( authorities.clone(), set_id, diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 56b26c964ce9b..44abb4b95beba 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -18,7 +18,6 @@ use std::{ collections::{HashMap, HashSet}, - marker::PhantomData, sync::Arc, }; @@ -43,25 +42,9 @@ use crate::{AuthorityList, Commit, Error}; /// nodes, and are used by syncing nodes to prove authority set handoffs. #[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)] pub struct GrandpaJustification { - /// The GRANDPA justification for block finality. - pub justification: sp_finality_grandpa::GrandpaJustification, - _block: PhantomData, -} - -impl From> - for GrandpaJustification -{ - fn from(justification: sp_finality_grandpa::GrandpaJustification) -> Self { - Self { justification, _block: Default::default() } - } -} - -impl Into> - for GrandpaJustification -{ - fn into(self) -> sp_finality_grandpa::GrandpaJustification { - self.justification - } + pub(crate) round: u64, + pub(crate) commit: Commit, + pub(crate) votes_ancestries: Vec, } impl GrandpaJustification { @@ -70,8 +53,8 @@ impl GrandpaJustification { pub fn from_commit( client: &Arc, round: u64, - commit: Commit, - ) -> Result + commit: Commit, + ) -> Result, Error> where C: HeaderBackend, { @@ -91,7 +74,7 @@ impl GrandpaJustification { .iter() .map(|signed| &signed.precommit) .min_by_key(|precommit| precommit.target_number) - .map(|precommit| (precommit.target_hash, precommit.target_number)) + .map(|precommit| (precommit.target_hash.clone(), precommit.target_number)) { None => return error(), Some(base) => base, @@ -125,7 +108,7 @@ impl GrandpaJustification { } } - Ok(sp_finality_grandpa::GrandpaJustification { round, commit, votes_ancestries }.into()) + Ok(GrandpaJustification { round, commit, votes_ancestries }) } /// Decode a GRANDPA justification and validate the commit and the votes' @@ -135,17 +118,15 @@ impl GrandpaJustification { finalized_target: (Block::Hash, NumberFor), set_id: u64, voters: &VoterSet, - ) -> Result + ) -> Result, ClientError> where NumberFor: finality_grandpa::BlockNumberOps, { let justification = GrandpaJustification::::decode(&mut &*encoded) .map_err(|_| ClientError::JustificationDecode)?; - if ( - justification.justification.commit.target_hash, - justification.justification.commit.target_number, - ) != finalized_target + if (justification.commit.target_hash, justification.commit.target_number) != + finalized_target { let msg = "invalid commit target in grandpa justification".to_string(); Err(ClientError::BadJustification(msg)) @@ -176,10 +157,9 @@ impl GrandpaJustification { { use finality_grandpa::Chain; - let ancestry_chain = AncestryChain::::new(&self.justification.votes_ancestries); + let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); - match finality_grandpa::validate_commit(&self.justification.commit, voters, &ancestry_chain) - { + match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { Ok(ref result) if result.is_valid() => {}, _ => { let msg = "invalid commit in grandpa justification".to_string(); @@ -191,13 +171,12 @@ impl GrandpaJustification { // should serve as the root block for populating ancestry (i.e. // collect all headers from all precommit blocks to the base) let base_hash = self - .justification .commit .precommits .iter() .map(|signed| &signed.precommit) .min_by_key(|precommit| precommit.target_number) - .map(|precommit| precommit.target_hash) + .map(|precommit| precommit.target_hash.clone()) .expect( "can only fail if precommits is empty; \ commit has been validated above; \ @@ -207,12 +186,12 @@ impl GrandpaJustification { let mut buf = Vec::new(); let mut visited_hashes = HashSet::new(); - for signed in self.justification.commit.precommits.iter() { + for signed in self.commit.precommits.iter() { if !sp_finality_grandpa::check_message_signature_with_buffer( &finality_grandpa::Message::Precommit(signed.precommit.clone()), &signed.id, &signed.signature, - self.justification.round, + self.round, set_id, &mut buf, ) { @@ -241,12 +220,8 @@ impl GrandpaJustification { } } - let ancestry_hashes: HashSet<_> = self - .justification - .votes_ancestries - .iter() - .map(|h: &Block::Header| h.hash()) - .collect(); + let ancestry_hashes: HashSet<_> = + self.votes_ancestries.iter().map(|h: &Block::Header| h.hash()).collect(); if visited_hashes != ancestry_hashes { return Err(ClientError::BadJustification( @@ -260,7 +235,7 @@ impl GrandpaJustification { /// The target block number and hash that this justifications proves finality for. pub fn target(&self) -> (NumberFor, Block::Hash) { - (self.justification.commit.target_number, self.justification.commit.target_hash) + (self.commit.target_number, self.commit.target_hash) } } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index a7326d57c2bf0..cb32957c0b0bf 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -68,7 +68,6 @@ use sc_client_api::{ StorageProvider, TransactionFor, }; use sc_consensus::BlockImport; -use sc_network_common::protocol::ProtocolName; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; @@ -144,35 +143,72 @@ use sp_finality_grandpa::{AuthorityList, AuthoritySignature, SetId}; use until_imported::UntilGlobalMessageBlocksImported; // Re-export these two because it's just so damn convenient. -pub use sp_finality_grandpa::{ - AuthorityId, AuthorityPair, CatchUp, Commit, CompactCommit, GrandpaApi, Message, Precommit, - Prevote, PrimaryPropose, ScheduledChange, SignedMessage, -}; +pub use sp_finality_grandpa::{AuthorityId, AuthorityPair, GrandpaApi, ScheduledChange}; use std::marker::PhantomData; #[cfg(test)] mod tests; +/// A GRANDPA message for a substrate chain. +pub type Message = finality_grandpa::Message<::Hash, NumberFor>; + +/// A signed message. +pub type SignedMessage = finality_grandpa::SignedMessage< + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, +>; + +/// A primary propose message for this chain's block type. +pub type PrimaryPropose = + finality_grandpa::PrimaryPropose<::Hash, NumberFor>; +/// A prevote message for this chain's block type. +pub type Prevote = finality_grandpa::Prevote<::Hash, NumberFor>; +/// A precommit message for this chain's block type. +pub type Precommit = finality_grandpa::Precommit<::Hash, NumberFor>; +/// A catch up message for this chain's block type. +pub type CatchUp = finality_grandpa::CatchUp< + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, +>; +/// A commit message for this chain's block type. +pub type Commit = finality_grandpa::Commit< + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, +>; +/// A compact commit message for this chain's block type. +pub type CompactCommit = finality_grandpa::CompactCommit< + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, +>; /// A global communication input stream for commits and catch up messages. Not /// exposed publicly, used internally to simplify types in the communication /// layer. -type CommunicationIn = voter::CommunicationIn< +type CommunicationIn = finality_grandpa::voter::CommunicationIn< ::Hash, NumberFor, AuthoritySignature, AuthorityId, >; + /// Global communication input stream for commits and catch up messages, with /// the hash type not being derived from the block, useful for forcing the hash /// to some type (e.g. `H256`) when the compiler can't do the inference. type CommunicationInH = - voter::CommunicationIn, AuthoritySignature, AuthorityId>; + finality_grandpa::voter::CommunicationIn, AuthoritySignature, AuthorityId>; /// Global communication sink for commits with the hash type not being derived /// from the block, useful for forcing the hash to some type (e.g. `H256`) when /// the compiler can't do the inference. type CommunicationOutH = - voter::CommunicationOut, AuthoritySignature, AuthorityId>; + finality_grandpa::voter::CommunicationOut, AuthoritySignature, AuthorityId>; /// Shared voter state for querying. pub struct SharedVoterState { @@ -196,7 +232,7 @@ impl SharedVoterState { } /// Get the inner `VoterState` instance. - pub fn voter_state(&self) -> Option> { + pub fn voter_state(&self) -> Option> { self.inner.read().as_ref().map(|vs| vs.get()) } } @@ -230,7 +266,7 @@ pub struct Config { /// TelemetryHandle instance. pub telemetry: Option, /// Chain specific GRANDPA protocol name. See [`crate::protocol_standard_name`]. - pub protocol_name: ProtocolName, + pub protocol_name: std::borrow::Cow<'static, str>, } impl Config { @@ -687,20 +723,19 @@ pub struct GrandpaParams { /// [`sc_network::config::NetworkConfiguration::extra_sets`]. /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( - protocol_name: ProtocolName, -) -> sc_network_common::config::NonDefaultSetConfig { + protocol_name: std::borrow::Cow<'static, str>, +) -> sc_network::config::NonDefaultSetConfig { use communication::grandpa_protocol_name; - sc_network_common::config::NonDefaultSetConfig { + sc_network::config::NonDefaultSetConfig { notifications_protocol: protocol_name, fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. max_notification_size: 1024 * 1024, - handshake: None, - set_config: sc_network_common::config::SetConfig { + set_config: sc_network::config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: sc_network_common::config::NonReservedPeerMode::Deny, + non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, }, } } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 93d20110ff5af..623ac577c5579 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -41,7 +41,7 @@ use sp_finality_grandpa::{ AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, GRANDPA_ENGINE_ID, }; use sp_keyring::Ed25519Keyring; -use sp_keystore::{testing::KeyStore as TestKeyStore, SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ codec::Encode, generic::{BlockId, DigestItem}, @@ -59,6 +59,7 @@ use authorities::AuthoritySet; use communication::grandpa_protocol_name; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_consensus::LongestChain; +use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::GRANDPA; type TestLinkHalf = @@ -169,7 +170,7 @@ pub(crate) struct RuntimeApi { impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; - fn runtime_api(&self) -> ApiRef<'_, Self::Api> { + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { RuntimeApi { inner: self.clone() }.into() } } @@ -209,14 +210,17 @@ impl GenesisAuthoritySetProvider for TestApi { const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() + keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() } -fn create_keystore(authority: Ed25519Keyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeyStore::new()); +fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile::TempDir) { + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); SyncCryptoStore::ed25519_generate_new(&*keystore, GRANDPA, Some(&authority.to_seed())) .expect("Creates authority key"); - keystore + + (keystore, keystore_path) } fn block_until_complete( @@ -239,7 +243,7 @@ fn initialize_grandpa( let voters = stream::FuturesUnordered::new(); for (peer_id, key) in peers.iter().enumerate() { - let keystore = create_keystore(*key); + let (keystore, _) = create_keystore(*key); let (net_service, link) = { // temporary needed for some reason @@ -363,11 +367,9 @@ fn finalize_3_voters_no_observers() { runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(20, false); net.block_until_sync(); - let hashof20 = net.peer(0).client().info().best_hash; for i in 0..3 { assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); - assert_eq!(net.peer(i).client().info().best_hash, hashof20, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -375,7 +377,12 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock().peer(0).client().justifications(hashof20).unwrap().is_none(), + net.lock() + .peer(0) + .client() + .justifications(&BlockId::Number(20)) + .unwrap() + .is_none(), "Extra justification for block#1", ); } @@ -443,7 +450,7 @@ fn finalize_3_voters_1_full_observer() { let justification = crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); - assert_eq!(justification.justification.commit.target_number, 20); + assert_eq!(justification.commit.target_number, 20); } } @@ -473,9 +480,11 @@ fn transition_3_voters_twice_1_full_observer() { let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); let mut voters = Vec::new(); for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { - let keystore = create_keystore(local_key); + let (keystore, keystore_path) = create_keystore(local_key); + keystore_paths.push(keystore_path); let (net_service, link) = { let net = net.lock(); @@ -524,7 +533,7 @@ fn transition_3_voters_twice_1_full_observer() { { let net = net.clone(); let client = net.lock().peers[0].client().clone(); - let peers_c = *peers_c; + let peers_c = peers_c.clone(); // wait for blocks to be finalized before generating new ones let block_production = client @@ -613,15 +622,19 @@ fn justification_is_generated_periodically() { net.peer(0).push_blocks(32, false); net.block_until_sync(); - let hashof32 = net.peer(0).client().info().best_hash; - let net = Arc::new(Mutex::new(net)); run_to_completion(&mut runtime, 32, net.clone(), peers); // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(hashof32).unwrap().is_some()); + assert!(net + .lock() + .peer(i) + .client() + .justifications(&BlockId::Number(32)) + .unwrap() + .is_some()); } } @@ -641,7 +654,7 @@ fn sync_justifications_on_change_blocks() { net.peer(0).push_blocks(20, false); // at block 21 we do add a transition which is instant - let hashof21 = net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { + net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; add_scheduled_change( &mut block, @@ -665,12 +678,25 @@ fn sync_justifications_on_change_blocks() { // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(hashof21).unwrap().is_some()); + assert!(net + .lock() + .peer(i) + .client() + .justifications(&BlockId::Number(21)) + .unwrap() + .is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justifications(hashof21).unwrap().is_none() { + if net + .lock() + .peer(3) + .client() + .justifications(&BlockId::Number(21)) + .unwrap() + .is_none() + { net.lock().poll(cx); Poll::Pending } else { @@ -908,6 +934,7 @@ fn voter_persists_its_votes() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -920,7 +947,11 @@ fn voter_persists_its_votes() { // create the communication layer for bob, but don't start any // voter. instead we'll listen for the prevote that alice casts // and cast our own manually - let bob_keystore = create_keystore(peers[1]); + let bob_keystore = { + let (keystore, keystore_path) = create_keystore(peers[1]); + keystore_paths.push(keystore_path); + keystore + }; let bob_network = { let config = Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -953,7 +984,7 @@ fn voter_persists_its_votes() { // spawn two voters for alice. // half-way through the test, we stop one and start the other. let (alice_voter1, abort) = future::abortable({ - let keystore = create_keystore(peers[0]); + let (keystore, _) = create_keystore(peers[0]); let (net_service, link) = { // temporary needed for some reason @@ -987,7 +1018,7 @@ fn voter_persists_its_votes() { peers: &[Ed25519Keyring], net: Arc>, ) -> impl Future + Send { - let keystore = create_keystore(peers[0]); + let (keystore, _) = create_keystore(peers[0]); let mut net = net.lock(); // we add a new peer to the test network and we'll use @@ -1235,6 +1266,8 @@ fn voter_catches_up_to_latest_round_when_behind() { Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) }; + let mut keystore_paths = Vec::new(); + // spawn authorities for (peer_id, key) in peers.iter().enumerate() { let (client, link) = { @@ -1251,7 +1284,8 @@ fn voter_catches_up_to_latest_round_when_behind() { .for_each(move |_| future::ready(())), ); - let keystore = create_keystore(*key); + let (keystore, keystore_path) = create_keystore(*key); + keystore_paths.push(keystore_path); let voter = voter(Some(keystore), peer_id, link, net.clone()); @@ -1423,12 +1457,7 @@ fn grandpa_environment_respects_voting_rules() { ); // we finalize block 19 with block 21 being the best block - let hashof19 = peer - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::Number(19)) - .unwrap(); - peer.client().finalize_block(hashof19, None, false).unwrap(); + peer.client().finalize_block(BlockId::Number(19), None, false).unwrap(); // the 3/4 environment should propose block 21 for voting assert_eq!( @@ -1450,12 +1479,7 @@ fn grandpa_environment_respects_voting_rules() { ); // we finalize block 21 with block 21 being the best block - let hashof21 = peer - .client() - .as_client() - .expect_block_hash_from_id(&BlockId::Number(21)) - .unwrap(); - peer.client().finalize_block(hashof21, None, false).unwrap(); + peer.client().finalize_block(BlockId::Number(21), None, false).unwrap(); // even though the default environment will always try to not vote on the // best block, there's a hard rule that we can't cast any votes lower than @@ -1481,7 +1505,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); - let keystore = create_keystore(peers[0]); + let (keystore, _keystore_path) = create_keystore(peers[0]); let environment = test_environment(&link, Some(keystore), network_service.clone(), ()); let round_state = || finality_grandpa::round::State::genesis(Default::default()); @@ -1666,7 +1690,7 @@ fn imports_justification_for_regular_blocks_on_import() { ); // the justification should be imported and available from the client - assert!(client.justifications(block_hash).unwrap().is_some()); + assert!(client.justifications(&BlockId::Hash(block_hash)).unwrap().is_some()); } #[test] @@ -1681,7 +1705,7 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); - let keystore = create_keystore(alice); + let (keystore, _keystore_path) = create_keystore(alice); test_environment(&link, Some(keystore), network_service.clone(), ()) }; diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index df0b63348e94b..6adce0d920209 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -354,7 +354,7 @@ fn warn_authority_wrong_target(hash: H, id: AuthorityId) ); } -impl BlockUntilImported for SignedMessage { +impl BlockUntilImported for SignedMessage { type Blocked = Self; fn needs_waiting>( @@ -389,13 +389,8 @@ impl BlockUntilImported for SignedMessage { /// Helper type definition for the stream which waits until vote targets for /// signed messages are imported. -pub(crate) type UntilVoteTargetImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - SignedMessage<::Header>, ->; +pub(crate) type UntilVoteTargetImported = + UntilImported>; /// This blocks a global message import, i.e. a commit or catch up messages, /// until all blocks referenced in its votes are known. @@ -592,7 +587,7 @@ mod tests { fn import_header(&self, header: Header) { let hash = header.hash(); - let number = *header.number(); + let number = header.number().clone(); self.known_blocks.lock().insert(hash, number); self.sender @@ -613,7 +608,7 @@ mod tests { impl BlockStatusT for TestBlockStatus { fn block_number(&self, hash: Hash) -> Result, Error> { - Ok(self.inner.lock().get(&hash).map(|x| *x)) + Ok(self.inner.lock().get(&hash).map(|x| x.clone())) } } @@ -651,7 +646,7 @@ mod tests { // unwrap the commit from `CommunicationIn` returning its fields in a tuple, // panics if the given message isn't a commit - fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit
) { + fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit) { match msg { voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), _ => panic!("expected commit"), @@ -660,7 +655,7 @@ mod tests { // unwrap the catch up from `CommunicationIn` returning its inner representation, // panics if the given message isn't a catch up - fn unapply_catch_up(msg: CommunicationIn) -> CatchUp
{ + fn unapply_catch_up(msg: CommunicationIn) -> CatchUp { match msg { voter::CommunicationIn::CatchUp(catch_up, ..) => catch_up, _ => panic!("expected catch up"), @@ -745,7 +740,7 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let unknown_commit = CompactCommit::
{ + let unknown_commit = CompactCommit:: { target_hash: h1.hash(), target_number: 5, precommits: vec![ @@ -773,7 +768,7 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let known_commit = CompactCommit::
{ + let known_commit = CompactCommit:: { target_hash: h1.hash(), target_number: 5, precommits: vec![ @@ -915,7 +910,7 @@ mod tests { // we create a commit message, with precommits for blocks 6 and 7 which // we haven't imported. - let unknown_commit = CompactCommit::
{ + let unknown_commit = CompactCommit:: { target_hash: h1.hash(), target_number: 5, precommits: vec![ diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index fb7754fc0169a..051c7f2c03658 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -419,7 +419,7 @@ mod tests { } let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); - let best_number = *best.number(); + let best_number = best.number().clone(); for i in 0u32..5 { let base = client.header(&BlockId::Number(i.into())).unwrap().unwrap(); diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index c9f762fc7d593..a31a0a8b91908 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -130,7 +130,7 @@ impl WarpSyncProof { } let justification = blockchain - .justifications(header.hash())? + .justifications(BlockId::Number(*last_block))? .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) .expect( "header is last in set and contains standard change signal; \ @@ -327,7 +327,7 @@ mod tests { use sp_consensus::BlockOrigin; use sp_finality_grandpa::GRANDPA_ENGINE_ID; use sp_keyring::Ed25519Keyring; - use sp_runtime::traits::Header as _; + use sp_runtime::{generic::BlockId, traits::Header as _}; use std::sync::Arc; use substrate_test_runtime_client::{ ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, @@ -412,7 +412,10 @@ mod tests { let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); client - .finalize_block(target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) + .finalize_block( + BlockId::Hash(target_hash), + Some((GRANDPA_ENGINE_ID, justification.encode())), + ) .unwrap(); authority_set_changes.push((current_set_id, n)); diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 073199d005fd1..528365d62c18b 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -17,9 +17,9 @@ ansi_term = "0.12.1" futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-network = { version = "0.10.0-dev", path = "../network" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 3d585a9985134..446ddf47b4cab 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -20,13 +20,7 @@ use crate::OutputFormat; use ansi_term::Colour; use log::info; use sc_client_api::ClientInfo; -use sc_network_common::{ - service::NetworkStatus, - sync::{ - warp::{WarpSyncPhase, WarpSyncProgress}, - SyncState, - }, -}; +use sc_network::{NetworkStatus, SyncState, WarpSyncPhase, WarpSyncProgress}; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; use std::{fmt, time::Instant}; @@ -93,37 +87,42 @@ impl InformantDisplay { (diff_bytes_inbound, diff_bytes_outbound) }; - let (level, status, target) = - match (net_status.sync_state, net_status.state_sync, net_status.warp_sync) { - ( - _, - _, - Some(WarpSyncProgress { phase: WarpSyncPhase::DownloadingBlocks(n), .. }), - ) => ("⏩", "Block history".into(), format!(", #{}", n)), - (_, _, Some(warp)) => ( - "⏩", - "Warping".into(), - format!( - ", {}, {:.2} Mib", - warp.phase, - (warp.total_bytes as f32) / (1024f32 * 1024f32) - ), + let (level, status, target) = match ( + net_status.sync_state, + net_status.best_seen_block, + net_status.state_sync, + net_status.warp_sync, + ) { + ( + _, + _, + _, + Some(WarpSyncProgress { phase: WarpSyncPhase::DownloadingBlocks(n), .. }), + ) => ("⏩", "Block history".into(), format!(", #{}", n)), + (_, _, _, Some(warp)) => ( + "⏩", + "Warping".into(), + format!( + ", {}, {:.2} Mib", + warp.phase, + (warp.total_bytes as f32) / (1024f32 * 1024f32) ), - (_, Some(state), _) => ( - "⚙️ ", - "Downloading state".into(), - format!( - ", {}%, {:.2} Mib", - state.percentage, - (state.size as f32) / (1024f32 * 1024f32) - ), + ), + (_, _, Some(state), _) => ( + "⚙️ ", + "Downloading state".into(), + format!( + ", {}%, {:.2} Mib", + state.percentage, + (state.size as f32) / (1024f32 * 1024f32) ), - (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading { target }, _, _) => - ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")), - (SyncState::Importing { target }, _, _) => - ("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")), - }; + ), + (SyncState::Idle, _, _, _) => ("💤", "Idle".into(), "".into()), + (SyncState::Downloading, None, _, _) => + ("⚙️ ", format!("Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n), None, _) => + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n)), + }; if self.format.enable_color { info!( diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 52f1c95fe0198..5dca77f1a7433 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -24,7 +24,7 @@ use futures_timer::Delay; use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network_common::service::NetworkStatusProvider; +use sc_network::NetworkService; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; @@ -53,13 +53,12 @@ impl Default for OutputFormat { } /// Builds the informant and returns a `Future` that drives the informant. -pub async fn build( +pub async fn build( client: Arc, - network: N, + network: Arc::Hash>>, pool: Arc

, format: OutputFormat, ) where - N: NetworkStatusProvider, C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, P: TransactionPool + MallocSizeOf, diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index ff963f9d446f6..be4adba2a52fe 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -14,10 +14,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" -async-trait = "0.1.57" -parking_lot = "0.12.1" -serde_json = "1.0.85" +async-trait = "0.1.50" +hex = "0.4.0" +parking_lot = "0.12.0" +serde_json = "1.0.79" thiserror = "1.0" sp-application-crypto = { version = "6.0.0", path = "../../primitives/application-crypto" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 54ff6a5b164a8..19be6715ffe3f 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -512,8 +512,8 @@ impl KeystoreInner { /// Returns `None` if the keystore only exists in-memory and there isn't any path to provide. fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { let mut buf = self.path.as_ref()?.clone(); - let key_type = array_bytes::bytes2hex("", &key_type.0); - let key = array_bytes::bytes2hex("", public); + let key_type = hex::encode(key_type.0); + let key = hex::encode(public); buf.push(key_type + key.as_str()); Some(buf) } @@ -534,7 +534,7 @@ impl KeystoreInner { // skip directories and non-unicode file names (hex is unicode) if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - match array_bytes::hex2bytes(name) { + match hex::decode(name) { Ok(ref hex) if hex.len() > 4 => { if hex[0..4] != id.0 { continue @@ -739,7 +739,7 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); - let file_name = temp_dir.path().join(array_bytes::bytes2hex("", &SR25519.0[..2])); + let file_name = temp_dir.path().join(hex::encode(&SR25519.0[..2])); fs::write(file_name, "test").expect("Invalid file is written"); assert!(SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty()); diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 95c281456396d..144c5ad168996 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,13 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = "0.7.6" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.49.0", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -lru = "0.8.1" +lru = "0.7.5" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } -sc-peerset = { version = "4.0.0-dev", path = "../peerset" } +sc-network = { version = "0.10.0-dev", path = "../network" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 5563b3be35e8d..2d086e89b4a10 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,8 +21,7 @@ use crate::{ Network, Validator, }; -use sc_network_common::protocol::{event::Event, ProtocolName}; -use sc_peerset::ReputationChange; +use sc_network::{Event, ReputationChange}; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -33,6 +32,7 @@ use log::trace; use prometheus_endpoint::Registry; use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, collections::{HashMap, VecDeque}, pin::Pin, sync::Arc, @@ -45,7 +45,7 @@ pub struct GossipEngine { state_machine: ConsensusGossip, network: Box + Send>, periodic_maintenance_interval: futures_timer::Delay, - protocol: ProtocolName, + protocol: Cow<'static, str>, /// Incoming events from the network. network_event_stream: Pin + Send>>, @@ -77,7 +77,7 @@ impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( network: N, - protocol: impl Into, + protocol: impl Into>, validator: Arc>, metrics_registry: Option<&Registry>, ) -> Self @@ -85,7 +85,7 @@ impl GossipEngine { B: 'static, { let protocol = protocol.into(); - let network_event_stream = network.event_stream("network-gossip"); + let network_event_stream = network.event_stream(); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), @@ -151,7 +151,7 @@ impl GossipEngine { /// Send addressed message to the given peers. The message is not kept or multicast /// later on. - pub fn send_message(&mut self, who: Vec, data: Vec) { + pub fn send_message(&mut self, who: Vec, data: Vec) { for who in &who { self.state_machine.send_message(&mut *self.network, who, data.clone()); } @@ -162,7 +162,7 @@ impl GossipEngine { /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. pub fn announce(&self, block: B::Hash, associated_data: Option>) { - self.network.announce_block(block, associated_data); + self.network.announce(block, associated_data); } } @@ -181,10 +181,7 @@ impl Future for GossipEngine { this.network.add_set_reserved(remote, this.protocol.clone()); }, Event::SyncDisconnected { remote } => { - this.network.remove_peers_from_reserved_set( - this.protocol.clone(), - vec![remote], - ); + this.network.remove_set_reserved(remote, this.protocol.clone()); }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { @@ -307,7 +304,7 @@ impl futures::future::FusedFuture for GossipEngine { #[cfg(test)] mod tests { use super::*; - use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; + use crate::{ValidationResult, ValidatorContext}; use async_std::task::spawn; use futures::{ channel::mpsc::{unbounded, UnboundedSender}, @@ -315,20 +312,10 @@ mod tests { future::poll_fn, }; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use sc_network_common::{ - config::MultiaddrWithPeerId, - protocol::role::ObservedRole, - service::{ - NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, - NotificationSender, NotificationSenderError, - }, - }; - use sp_runtime::{ - testing::H256, - traits::{Block as BlockT, NumberFor}, - }; + use sc_network::ObservedRole; + use sp_runtime::{testing::H256, traits::Block as BlockT}; use std::{ - collections::HashSet, + borrow::Cow, sync::{Arc, Mutex}, }; use substrate_test_runtime_client::runtime::Block; @@ -343,109 +330,29 @@ mod tests { event_senders: Vec>, } - impl NetworkPeers for TestNetwork { - fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!(); - } - - fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!(); - } - - fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { - unimplemented!(); - } - - fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) {} - - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { - unimplemented!(); - } - - fn accept_unreserved_peers(&self) { - unimplemented!(); - } - - fn deny_unreserved_peers(&self) { - unimplemented!(); - } - - fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { - unimplemented!(); - } - - fn remove_reserved_peer(&self, _peer_id: PeerId) { - unimplemented!(); - } - - fn set_reserved_peers( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn add_peers_to_reserved_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} - - fn add_to_peers_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn sync_num_connected(&self) -> usize { - unimplemented!(); - } - } - - impl NetworkEventStream for TestNetwork { - fn event_stream(&self, _name: &'static str) -> Pin + Send>> { + impl Network for TestNetwork { + fn event_stream(&self) -> Pin + Send>> { let (tx, rx) = unbounded(); self.inner.lock().unwrap().event_senders.push(tx); Box::pin(rx) } - } - impl NetworkNotification for TestNetwork { - fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { - unimplemented!(); - } + fn report_peer(&self, _: PeerId, _: ReputationChange) {} - fn notification_sender( - &self, - _target: PeerId, - _protocol: ProtocolName, - ) -> Result, NotificationSenderError> { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - } - impl NetworkBlock<::Hash, NumberFor> for TestNetwork { - fn announce_block(&self, _hash: ::Hash, _data: Option>) { + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); } - fn new_best_block_imported( - &self, - _hash: ::Hash, - _number: NumberFor, - ) { + fn announce(&self, _: B::Hash, _: Option>) { unimplemented!(); } } @@ -493,7 +400,7 @@ mod tests { #[test] fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() { let topic = H256::default(); - let protocol = ProtocolName::from("/my_protocol"); + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); @@ -509,7 +416,7 @@ mod tests { // Register the remote peer. event_sender .start_send(Event::NotificationStreamOpened { - remote: remote_peer, + remote: remote_peer.clone(), protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, @@ -521,7 +428,7 @@ mod tests { .iter() .cloned() .map(|m| Event::NotificationsReceived { - remote: remote_peer, + remote: remote_peer.clone(), messages: vec![(protocol.clone(), m.into())], }) .collect::>(); @@ -551,7 +458,10 @@ mod tests { for subscriber in subscribers.iter_mut() { assert_eq!( subscriber.next(), - Some(TopicNotification { message: message.clone(), sender: Some(remote_peer) }), + Some(TopicNotification { + message: message.clone(), + sender: Some(remote_peer.clone()), + }), ); } } @@ -610,7 +520,7 @@ mod tests { } fn prop(channels: Vec, notifications: Vec>) { - let protocol = ProtocolName::from("/my_protocol"); + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); @@ -647,7 +557,7 @@ mod tests { // Create channels. let (txs, mut rxs) = channels .iter() - .map(|ChannelLengthAndTopic { length, topic }| (*topic, channel(*length))) + .map(|ChannelLengthAndTopic { length, topic }| (topic.clone(), channel(*length))) .fold((vec![], vec![]), |mut acc, (topic, (tx, rx))| { acc.0.push((topic, tx)); acc.1.push((topic, rx)); @@ -669,7 +579,7 @@ mod tests { // Register the remote peer. event_sender .start_send(Event::NotificationStreamOpened { - remote: remote_peer, + remote: remote_peer.clone(), protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, @@ -696,7 +606,10 @@ mod tests { .collect(); event_sender - .start_send(Event::NotificationsReceived { remote: remote_peer, messages }) + .start_send(Event::NotificationsReceived { + remote: remote_peer.clone(), + messages, + }) .expect("Event stream is unbounded; qed."); } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 1c8fb8ba05ce7..4b83708702466 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -41,9 +41,9 @@ //! messages. //! //! The [`GossipEngine`] will automatically use [`Network::add_set_reserved`] and -//! [`NetworkPeers::remove_peers_from_reserved_set`] to maintain a set of peers equal to the set of -//! peers the node is syncing from. See the documentation of `sc-network` for more explanations -//! about the concepts of peer sets. +//! [`Network::remove_set_reserved`] to maintain a set of peers equal to the set of peers the +//! node is syncing from. See the documentation of `sc-network` for more explanations about the +//! concepts of peer sets. //! //! # What is a validator? //! @@ -67,36 +67,74 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use libp2p::{multiaddr, PeerId}; -use sc_network_common::{ - protocol::ProtocolName, - service::{NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers}, -}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::iter; +use futures::prelude::*; +use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; +use sp_runtime::traits::Block as BlockT; +use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; mod bridge; mod state_machine; mod validator; /// Abstraction over a network. -pub trait Network: - NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock> -{ - fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) { +pub trait Network { + /// Returns a stream of events representing what happens on the network. + fn event_stream(&self) -> Pin + Send>>; + + /// Adjust the reputation of a node. + fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); + + /// Adds the peer to the set of peers to be connected to with this protocol. + fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Removes the peer from the set of peers to be connected to with this protocol. + fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Force-disconnect a peer. + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Send a notification to a peer. + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); + + /// Notify everyone we're connected to that we have the given block. + /// + /// Note: this method isn't strictly related to gossiping and should eventually be moved + /// somewhere else. + fn announce(&self, block: B::Hash, associated_data: Option>); +} + +impl Network for Arc> { + fn event_stream(&self) -> Pin + Send>> { + Box::pin(NetworkService::event_stream(self, "network-gossip")) + } + + fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { + NetworkService::report_peer(self, peer_id, reputation); + } + + fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { let addr = iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); - let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); + let result = + NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } -} -impl Network for T where - T: NetworkPeers - + NetworkEventStream - + NetworkNotification - + NetworkBlock> -{ + fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { + NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(who).collect()); + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + NetworkService::disconnect_peer(self, who, protocol) + } + + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { + NetworkService::write_notification(self, who, protocol, message) + } + + fn announce(&self, block: B::Hash, associated_data: Option>) { + NetworkService::announce_block(self, block, associated_data) + } } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 001f2c6136a00..8a016cbaab3da 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,9 +22,9 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::protocol::{role::ObservedRole, ProtocolName}; +use sc_network::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use std::{collections::HashMap, iter, num::NonZeroUsize, sync::Arc, time, time::Instant}; +use std::{borrow::Cow, collections::HashMap, iter, sync::Arc, time, time::Instant}; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 // NOTE: The current value is adjusted based on largest production network deployment (Kusama) and @@ -42,9 +42,9 @@ const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_millis(750); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); mod rep { - use sc_peerset::ReputationChange as Rep; + use sc_network::ReputationChange as Rep; /// Reputation change when a peer sends us a gossip message that we didn't know about. - pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successful gossip"); + pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successfull gossip"); /// Reputation change when a peer sends us a gossip message that we already knew about. pub const DUPLICATE_GOSSIP: Rep = Rep::new(-(1 << 2), "Duplicate gossip"); } @@ -99,7 +99,7 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { fn propagate<'a, B: BlockT, I>( network: &mut dyn Network, - protocol: ProtocolName, + protocol: Cow<'static, str>, messages: I, intent: MessageIntent, peers: &mut HashMap>, @@ -155,7 +155,7 @@ pub struct ConsensusGossip { peers: HashMap>, messages: Vec>, known_messages: LruCache, - protocol: ProtocolName, + protocol: Cow<'static, str>, validator: Arc>, next_broadcast: Instant, metrics: Option, @@ -165,7 +165,7 @@ impl ConsensusGossip { /// Create a new instance using the given validator. pub fn new( validator: Arc>, - protocol: ProtocolName, + protocol: Cow<'static, str>, metrics_registry: Option<&Registry>, ) -> Self { let metrics = match metrics_registry.map(Metrics::register) { @@ -180,11 +180,7 @@ impl ConsensusGossip { ConsensusGossip { peers: HashMap::new(), messages: Default::default(), - known_messages: { - let cap = NonZeroUsize::new(KNOWN_MESSAGES_CACHE_SIZE) - .expect("cache capacity is not zero"); - LruCache::new(cap) - }, + known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), protocol, validator, next_broadcast: Instant::now() + REBROADCAST_INTERVAL, @@ -515,23 +511,11 @@ impl Metrics { #[cfg(test)] mod tests { use super::*; - use crate::multiaddr::Multiaddr; use futures::prelude::*; - use sc_network_common::{ - config::MultiaddrWithPeerId, - protocol::event::Event, - service::{ - NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, - NotificationSender, NotificationSenderError, - }, - }; - use sc_peerset::ReputationChange; - use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, - traits::NumberFor, - }; + use sc_network::{Event, ReputationChange}; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; use std::{ - collections::HashSet, + borrow::Cow, pin::Pin, sync::{Arc, Mutex}, }; @@ -585,108 +569,28 @@ mod tests { peer_reports: Vec<(PeerId, ReputationChange)>, } - impl NetworkPeers for NoOpNetwork { - fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!(); - } - - fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!(); - } - - fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { - unimplemented!(); - } - - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.inner.lock().unwrap().peer_reports.push((who, cost_benefit)); - } - - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { - unimplemented!(); - } - - fn accept_unreserved_peers(&self) { - unimplemented!(); - } - - fn deny_unreserved_peers(&self) { - unimplemented!(); - } - - fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { - unimplemented!(); - } - - fn remove_reserved_peer(&self, _peer_id: PeerId) { + impl Network for NoOpNetwork { + fn event_stream(&self) -> Pin + Send>> { unimplemented!(); } - fn set_reserved_peers( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn add_peers_to_reserved_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} - - fn add_to_peers_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); + fn report_peer(&self, peer_id: PeerId, reputation_change: ReputationChange) { + self.inner.lock().unwrap().peer_reports.push((peer_id, reputation_change)); } - fn sync_num_connected(&self) -> usize { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - } - impl NetworkEventStream for NoOpNetwork { - fn event_stream(&self, _name: &'static str) -> Pin + Send>> { - unimplemented!(); - } - } + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - impl NetworkNotification for NoOpNetwork { - fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { - unimplemented!(); - } + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn notification_sender( - &self, - _target: PeerId, - _protocol: ProtocolName, - ) -> Result, NotificationSenderError> { - unimplemented!(); - } - } - - impl NetworkBlock<::Hash, NumberFor> for NoOpNetwork { - fn announce_block(&self, _hash: ::Hash, _data: Option>) { + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); } - fn new_best_block_imported( - &self, - _hash: ::Hash, - _number: NumberFor, - ) { + fn announce(&self, _: B::Hash, _: Option>) { unimplemented!(); } } @@ -804,7 +708,7 @@ mod tests { .on_incoming( &mut network, // Unregistered peer. - remote, + remote.clone(), vec![vec![1, 2, 3]], ); diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 77dcc3bdc8791..7d60f7b31397f 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -16,8 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use libp2p::PeerId; -use sc_network_common::protocol::role::ObservedRole; +use sc_network::{ObservedRole, PeerId}; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 4637a2a5105e5..2742262b57e40 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -13,8 +13,10 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[build-dependencies] +prost-build = "0.10" + [dependencies] -array-bytes = "4.1" async-trait = "0.1" asynchronous-codec = "0.6" bitflags = "1.3.2" @@ -25,21 +27,23 @@ either = "1.5.3" fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" +hex = "0.4.0" ip_network = "0.4.1" -libp2p = { version = "0.49.0", features = ["async-std", "dns", "identify", "kad", "mdns-async-io", "mplex", "noise", "ping", "tcp", "yamux", "websocket"] } +libp2p = "0.46.1" linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" -lru = "0.8.1" -parking_lot = "0.12.1" -pin-project = "1.0.12" -prost = "0.11" +lru = "0.7.5" +parking_lot = "0.12.0" +pin-project = "1.0.10" +prost = "0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.79" smallvec = "1.8.0" thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } +void = "1.0.2" zeroize = "1.4.3" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } @@ -57,7 +61,7 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3" -async-std = { version = "1.11.0", features = ["attributes"] } +async-std = "1.11.0" rand = "0.7.2" tempfile = "3.1.0" sc-network-light = { version = "0.10.0-dev", path = "./light" } diff --git a/client/network/bitswap/Cargo.toml b/client/network/bitswap/Cargo.toml deleted file mode 100644 index f60e21b4429fb..0000000000000 --- a/client/network/bitswap/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -description = "Substrate bitswap protocol" -name = "sc-network-bitswap" -version = "0.10.0-dev" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -documentation = "https://docs.rs/sc-network-bitswap" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[build-dependencies] -prost-build = "0.11" - -[dependencies] -cid = "0.8.6" -futures = "0.3.21" -libp2p = "0.49.0" -log = "0.4.17" -prost = "0.11" -thiserror = "1.0" -unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } -void = "1.0.2" -sc-client-api = { version = "4.0.0-dev", path = "../../api" } -sc-network-common = { version = "0.10.0-dev", path = "../common" } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } - -[dev-dependencies] -tokio = { version = "1", features = ["full"] } -sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } -sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } -sp-core = { version = "6.0.0", path = "../../../primitives/core" } -sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs deleted file mode 100644 index 62a18b18c839d..0000000000000 --- a/client/network/bitswap/src/lib.rs +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Bitswap server for Substrate. -//! -//! Allows querying transactions by hash over standard bitswap protocol -//! Only supports bitswap 1.2.0. -//! CID is expected to reference 256-bit Blake2b transaction hash. - -use cid::{self, Version}; -use futures::{channel::mpsc, StreamExt}; -use libp2p::core::PeerId; -use log::{debug, error, trace}; -use prost::Message; -use sc_client_api::BlockBackend; -use sc_network_common::{ - protocol::ProtocolName, - request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, -}; -use schema::bitswap::{ - message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType}, - Message as BitswapMessage, -}; -use sp_runtime::traits::Block as BlockT; -use std::{io, sync::Arc, time::Duration}; -use unsigned_varint::encode as varint_encode; - -mod schema; - -const LOG_TARGET: &str = "bitswap"; - -// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes -// https://github.com/ipfs/js-ipfs-bitswap/blob/ -// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 -// We set it to the same value as max substrate protocol message -const MAX_PACKET_SIZE: u64 = 16 * 1024 * 1024; - -/// Max number of queued responses before denying requests. -const MAX_REQUEST_QUEUE: usize = 20; - -/// Max number of blocks per wantlist -const MAX_WANTED_BLOCKS: usize = 16; - -/// Bitswap protocol name -const PROTOCOL_NAME: &'static str = "/ipfs/bitswap/1.2.0"; - -/// Prefix represents all metadata of a CID, without the actual content. -#[derive(PartialEq, Eq, Clone, Debug)] -struct Prefix { - /// The version of CID. - pub version: Version, - /// The codec of CID. - pub codec: u64, - /// The multihash type of CID. - pub mh_type: u64, - /// The multihash length of CID. - pub mh_len: u8, -} - -impl Prefix { - /// Convert the prefix to encoded bytes. - pub fn to_bytes(&self) -> Vec { - let mut res = Vec::with_capacity(4); - let mut buf = varint_encode::u64_buffer(); - let version = varint_encode::u64(self.version.into(), &mut buf); - res.extend_from_slice(version); - let mut buf = varint_encode::u64_buffer(); - let codec = varint_encode::u64(self.codec, &mut buf); - res.extend_from_slice(codec); - let mut buf = varint_encode::u64_buffer(); - let mh_type = varint_encode::u64(self.mh_type, &mut buf); - res.extend_from_slice(mh_type); - let mut buf = varint_encode::u64_buffer(); - let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); - res.extend_from_slice(mh_len); - res - } -} - -/// Bitswap request handler -pub struct BitswapRequestHandler { - client: Arc + Send + Sync>, - request_receiver: mpsc::Receiver, -} - -impl BitswapRequestHandler { - /// Create a new [`BitswapRequestHandler`]. - pub fn new(client: Arc + Send + Sync>) -> (Self, ProtocolConfig) { - let (tx, request_receiver) = mpsc::channel(MAX_REQUEST_QUEUE); - - let config = ProtocolConfig { - name: ProtocolName::from(PROTOCOL_NAME), - fallback_names: vec![], - max_request_size: MAX_PACKET_SIZE, - max_response_size: MAX_PACKET_SIZE, - request_timeout: Duration::from_secs(15), - inbound_queue: Some(tx), - }; - - (Self { client, request_receiver }, config) - } - - /// Run [`BitswapRequestHandler`]. - pub async fn run(mut self) { - while let Some(request) = self.request_receiver.next().await { - let IncomingRequest { peer, payload, pending_response } = request; - - match self.handle_message(&peer, &payload) { - Ok(response) => { - let response = OutgoingResponse { - result: Ok(response), - reputation_changes: Vec::new(), - sent_feedback: None, - }; - - match pending_response.send(response) { - Ok(()) => - trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",), - Err(_) => debug!( - target: LOG_TARGET, - "Failed to handle light client request from {peer}: {}", - BitswapError::SendResponse, - ), - } - }, - Err(err) => { - error!(target: LOG_TARGET, "Failed to process request from {peer}: {err}"); - - // TODO: adjust reputation? - - let response = OutgoingResponse { - result: Err(()), - reputation_changes: vec![], - sent_feedback: None, - }; - - if pending_response.send(response).is_err() { - debug!( - target: LOG_TARGET, - "Failed to handle bitswap request from {peer}: {}", - BitswapError::SendResponse, - ); - } - }, - } - } - } - - /// Handle received Bitswap request - fn handle_message( - &mut self, - peer: &PeerId, - payload: &Vec, - ) -> Result, BitswapError> { - let request = schema::bitswap::Message::decode(&payload[..])?; - - trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); - - let mut response = BitswapMessage::default(); - - let wantlist = match request.wantlist { - Some(wantlist) => wantlist, - None => { - debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); - return Err(BitswapError::InvalidWantList) - }, - }; - - if wantlist.entries.len() > MAX_WANTED_BLOCKS { - trace!(target: LOG_TARGET, "Ignored request: too many entries"); - return Err(BitswapError::TooManyEntries) - } - - for entry in wantlist.entries { - let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { - Ok(cid) => cid, - Err(e) => { - trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); - continue - }, - }; - - if cid.version() != cid::Version::V1 || - cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || - cid.hash().size() != 32 - { - debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); - continue - } - - let mut hash = B::Hash::default(); - hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); - let transaction = match self.client.indexed_transaction(hash) { - Ok(ex) => ex, - Err(e) => { - error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); - None - }, - }; - - match transaction { - Some(transaction) => { - trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); - - if entry.want_type == WantType::Block as i32 { - let prefix = Prefix { - version: cid.version(), - codec: cid.codec(), - mh_type: cid.hash().code(), - mh_len: cid.hash().size(), - }; - response - .payload - .push(MessageBlock { prefix: prefix.to_bytes(), data: transaction }); - } else { - response.block_presences.push(BlockPresence { - r#type: BlockPresenceType::Have as i32, - cid: cid.to_bytes(), - }); - } - }, - None => { - trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash); - - if entry.send_dont_have { - response.block_presences.push(BlockPresence { - r#type: BlockPresenceType::DontHave as i32, - cid: cid.to_bytes(), - }); - } - }, - } - } - - Ok(response.encode_to_vec()) - } -} - -/// Bitswap protocol error. -#[derive(Debug, thiserror::Error)] -pub enum BitswapError { - /// Protobuf decoding error. - #[error("Failed to decode request: {0}.")] - DecodeProto(#[from] prost::DecodeError), - - /// Protobuf encoding error. - #[error("Failed to encode response: {0}.")] - EncodeProto(#[from] prost::EncodeError), - - /// Client backend error. - #[error(transparent)] - Client(#[from] sp_blockchain::Error), - - /// Error parsing CID - #[error(transparent)] - BadCid(#[from] cid::Error), - - /// Packet read error. - #[error(transparent)] - Read(#[from] io::Error), - - /// Error sending response. - #[error("Failed to send response.")] - SendResponse, - - /// Message doesn't have a WANT list. - #[error("Invalid WANT list.")] - InvalidWantList, - - /// Too many blocks requested. - #[error("Too many block entries in the request.")] - TooManyEntries, -} - -#[cfg(test)] -mod tests { - use super::*; - use futures::{channel::oneshot, SinkExt}; - use sc_block_builder::BlockBuilderProvider; - use schema::bitswap::{ - message::{wantlist::Entry, Wantlist}, - Message as BitswapMessage, - }; - use sp_consensus::BlockOrigin; - use sp_runtime::codec::Encode; - use substrate_test_runtime::Extrinsic; - use substrate_test_runtime_client::{self, prelude::*, TestClientBuilder}; - - #[tokio::test] - async fn undecodeable_message() { - let client = substrate_test_runtime_client::new(); - let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); - - tokio::spawn(async move { bitswap.run().await }); - - let (tx, rx) = oneshot::channel(); - config - .inbound_queue - .unwrap() - .send(IncomingRequest { - peer: PeerId::random(), - payload: vec![0x13, 0x37, 0x13, 0x38], - pending_response: tx, - }) - .await - .unwrap(); - - if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { - assert_eq!(result, Err(())); - assert_eq!(reputation_changes, Vec::new()); - assert!(sent_feedback.is_none()); - } else { - panic!("invalid event received"); - } - } - - #[tokio::test] - async fn empty_want_list() { - let client = substrate_test_runtime_client::new(); - let (bitswap, mut config) = BitswapRequestHandler::new(Arc::new(client)); - - tokio::spawn(async move { bitswap.run().await }); - - let (tx, rx) = oneshot::channel(); - config - .inbound_queue - .as_mut() - .unwrap() - .send(IncomingRequest { - peer: PeerId::random(), - payload: BitswapMessage { wantlist: None, ..Default::default() }.encode_to_vec(), - pending_response: tx, - }) - .await - .unwrap(); - - if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { - assert_eq!(result, Err(())); - assert_eq!(reputation_changes, Vec::new()); - assert!(sent_feedback.is_none()); - } else { - panic!("invalid event received"); - } - - // Empty WANT list should not cause an error - let (tx, rx) = oneshot::channel(); - config - .inbound_queue - .unwrap() - .send(IncomingRequest { - peer: PeerId::random(), - payload: BitswapMessage { - wantlist: Some(Default::default()), - ..Default::default() - } - .encode_to_vec(), - pending_response: tx, - }) - .await - .unwrap(); - - if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { - assert_eq!(result, Ok(BitswapMessage::default().encode_to_vec())); - assert_eq!(reputation_changes, Vec::new()); - assert!(sent_feedback.is_none()); - } else { - panic!("invalid event received"); - } - } - - #[tokio::test] - async fn too_long_want_list() { - let client = substrate_test_runtime_client::new(); - let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); - - tokio::spawn(async move { bitswap.run().await }); - - let (tx, rx) = oneshot::channel(); - config - .inbound_queue - .unwrap() - .send(IncomingRequest { - peer: PeerId::random(), - payload: BitswapMessage { - wantlist: Some(Wantlist { - entries: (0..MAX_WANTED_BLOCKS + 1) - .map(|_| Entry::default()) - .collect::>(), - full: false, - }), - ..Default::default() - } - .encode_to_vec(), - pending_response: tx, - }) - .await - .unwrap(); - - if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { - assert_eq!(result, Err(())); - assert_eq!(reputation_changes, Vec::new()); - assert!(sent_feedback.is_none()); - } else { - panic!("invalid event received"); - } - } - - #[tokio::test] - async fn transaction_not_found() { - let client = TestClientBuilder::with_tx_storage(u32::MAX).build(); - - let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); - tokio::spawn(async move { bitswap.run().await }); - - let (tx, rx) = oneshot::channel(); - config - .inbound_queue - .unwrap() - .send(IncomingRequest { - peer: PeerId::random(), - payload: BitswapMessage { - wantlist: Some(Wantlist { - entries: vec![Entry { - block: cid::Cid::new_v1( - 0x70, - cid::multihash::Multihash::wrap( - u64::from(cid::multihash::Code::Blake2b256), - &[0u8; 32], - ) - .unwrap(), - ) - .to_bytes(), - ..Default::default() - }], - full: false, - }), - ..Default::default() - } - .encode_to_vec(), - pending_response: tx, - }) - .await - .unwrap(); - - if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { - assert_eq!(result, Ok(vec![])); - assert_eq!(reputation_changes, Vec::new()); - assert!(sent_feedback.is_none()); - } else { - panic!("invalid event received"); - } - } - - #[tokio::test] - async fn transaction_found() { - let mut client = TestClientBuilder::with_tx_storage(u32::MAX).build(); - let mut block_builder = client.new_block(Default::default()).unwrap(); - - let ext = Extrinsic::Store(vec![0x13, 0x37, 0x13, 0x38]); - - block_builder.push(ext.clone()).unwrap(); - let block = block_builder.build().unwrap().block; - - client.import(BlockOrigin::File, block).await.unwrap(); - - let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); - - tokio::spawn(async move { bitswap.run().await }); - - let (tx, rx) = oneshot::channel(); - config - .inbound_queue - .unwrap() - .send(IncomingRequest { - peer: PeerId::random(), - payload: BitswapMessage { - wantlist: Some(Wantlist { - entries: vec![Entry { - block: cid::Cid::new_v1( - 0x70, - cid::multihash::Multihash::wrap( - u64::from(cid::multihash::Code::Blake2b256), - &sp_core::hashing::blake2_256(&ext.encode()[2..]), - ) - .unwrap(), - ) - .to_bytes(), - ..Default::default() - }], - full: false, - }), - ..Default::default() - } - .encode_to_vec(), - pending_response: tx, - }) - .await - .unwrap(); - - if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { - assert_eq!(reputation_changes, Vec::new()); - assert!(sent_feedback.is_none()); - - let response = - schema::bitswap::Message::decode(&result.expect("fetch to succeed")[..]).unwrap(); - assert_eq!(response.payload[0].data, vec![0x13, 0x37, 0x13, 0x38]); - } else { - panic!("invalid event received"); - } - } -} diff --git a/client/network/bitswap/build.rs b/client/network/build.rs similarity index 100% rename from client/network/bitswap/build.rs rename to client/network/build.rs diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 48d83a59c742b..b0e3a8fe42a83 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -14,26 +14,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.11" +prost-build = "0.10" [dependencies] -async-trait = "0.1.57" bitflags = "1.3.2" -bytes = "1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -futures-timer = "3.0.2" -libp2p = { version = "0.49.0", features = [ "request-response", "kad" ] } -linked_hash_set = "0.1.3" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +libp2p = "0.46.1" smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -serde = { version = "1.0.136", features = ["derive"] } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -thiserror = "1.0" diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 96c7c11ec2696..92f8df5cd380f 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,15 +18,9 @@ //! Configuration of the networking layer. -use crate::protocol; +use std::{fmt, str}; -use codec::Encode; -use libp2p::{multiaddr, Multiaddr, PeerId}; -use std::{fmt, str, str::FromStr}; - -/// Protocol name prefix, transmitted on the wire for legacy protocol names. -/// I.e., `dot` in `/dot/sync/2`. Should be unique for each chain. Always UTF-8. -/// Deprecated in favour of genesis hash & fork ID based protocol names. +/// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. #[derive(Clone, PartialEq, Eq, Hash)] pub struct ProtocolId(smallvec::SmallVec<[u8; 6]>); @@ -48,286 +42,3 @@ impl fmt::Debug for ProtocolId { fmt::Debug::fmt(self.as_ref(), f) } } - -/// Parses a string address and splits it into Multiaddress and PeerId, if -/// valid. -/// -/// # Example -/// -/// ``` -/// # use libp2p::{Multiaddr, PeerId}; -/// # use sc_network_common::config::parse_str_addr; -/// let (peer_id, addr) = parse_str_addr( -/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" -/// ).unwrap(); -/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); -/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); -/// ``` -pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { - let addr: Multiaddr = addr_str.parse()?; - parse_addr(addr) -} - -/// Splits a Multiaddress into a Multiaddress and PeerId. -pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, - _ => return Err(ParseErr::PeerIdMissing), - }; - - Ok((who, addr)) -} - -/// Address of a node, including its identity. -/// -/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. -/// -/// # Example -/// -/// ``` -/// # use libp2p::{Multiaddr, PeerId}; -/// # use sc_network_common::config::MultiaddrWithPeerId; -/// let addr: MultiaddrWithPeerId = -/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); -/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); -/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); -/// ``` -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] -#[serde(try_from = "String", into = "String")] -pub struct MultiaddrWithPeerId { - /// Address of the node. - pub multiaddr: Multiaddr, - /// Its identity. - pub peer_id: PeerId, -} - -impl MultiaddrWithPeerId { - /// Concatenates the multiaddress and peer ID into one multiaddress containing both. - pub fn concat(&self) -> Multiaddr { - let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); - self.multiaddr.clone().with(proto) - } -} - -impl fmt::Display for MultiaddrWithPeerId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.concat(), f) - } -} - -impl FromStr for MultiaddrWithPeerId { - type Err = ParseErr; - - fn from_str(s: &str) -> Result { - let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(Self { peer_id, multiaddr }) - } -} - -impl From for String { - fn from(ma: MultiaddrWithPeerId) -> String { - format!("{}", ma) - } -} - -impl TryFrom for MultiaddrWithPeerId { - type Error = ParseErr; - fn try_from(string: String) -> Result { - string.parse() - } -} - -/// Error that can be generated by `parse_str_addr`. -#[derive(Debug)] -pub enum ParseErr { - /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), - /// Multihash of the peer ID is invalid. - InvalidPeerId, - /// The peer ID is missing from the address. - PeerIdMissing, -} - -impl fmt::Display for ParseErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::MultiaddrParse(err) => write!(f, "{}", err), - Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), - Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), - } - } -} - -impl std::error::Error for ParseErr { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::MultiaddrParse(err) => Some(err), - Self::InvalidPeerId => None, - Self::PeerIdMissing => None, - } - } -} - -impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { - Self::MultiaddrParse(err) - } -} - -/// Configuration for a set of nodes. -#[derive(Clone, Debug)] -pub struct SetConfig { - /// Maximum allowed number of incoming substreams related to this set. - pub in_peers: u32, - /// Number of outgoing substreams related to this set that we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically - /// refused. - pub non_reserved_mode: NonReservedPeerMode, -} - -impl Default for SetConfig { - fn default() -> Self { - Self { - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - } - } -} - -/// Custom handshake for the notification protocol -#[derive(Debug, Clone)] -pub struct NotificationHandshake(Vec); - -impl NotificationHandshake { - /// Create new `NotificationHandshake` from an object that implements `Encode` - pub fn new(handshake: H) -> Self { - Self(handshake.encode()) - } - - /// Create new `NotificationHandshake` from raw bytes - pub fn from_bytes(bytes: Vec) -> Self { - Self(bytes) - } -} - -impl std::ops::Deref for NotificationHandshake { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Extension to [`SetConfig`] for sets that aren't the default set. -/// -/// > **Note**: As new fields might be added in the future, please consider using the `new` method -/// > and modifiers instead of creating this struct manually. -#[derive(Clone, Debug)] -pub struct NonDefaultSetConfig { - /// Name of the notifications protocols of this set. A substream on this set will be - /// considered established once this protocol is open. - /// - /// > **Note**: This field isn't present for the default set, as this is handled internally - /// > by the networking code. - pub notifications_protocol: protocol::ProtocolName, - /// If the remote reports that it doesn't support the protocol indicated in the - /// `notifications_protocol` field, then each of these fallback names will be tried one by - /// one. - /// - /// If a fallback is used, it will be reported in - /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` - pub fallback_names: Vec, - /// Handshake of the protocol - /// - /// NOTE: Currently custom handshakes are not fully supported. See issue #5685 for more - /// details. This field is temporarily used to allow moving the hardcoded block announcement - /// protocol out of `protocol.rs`. - pub handshake: Option, - /// Maximum allowed size of single notifications. - pub max_notification_size: u64, - /// Base configuration. - pub set_config: SetConfig, -} - -impl NonDefaultSetConfig { - /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: protocol::ProtocolName, max_notification_size: u64) -> Self { - Self { - notifications_protocol, - max_notification_size, - fallback_names: Vec::new(), - handshake: None, - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } - - /// Modifies the configuration to allow non-reserved nodes. - pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { - self.set_config.in_peers = in_peers; - self.set_config.out_peers = out_peers; - self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; - } - - /// Add a node to the list of reserved nodes. - pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { - self.set_config.reserved_nodes.push(peer); - } - - /// Add a list of protocol names used for backward compatibility. - /// - /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. - pub fn add_fallback_names(&mut self, fallback_names: Vec) { - self.fallback_names.extend(fallback_names); - } -} - -/// Configuration for the transport layer. -#[derive(Clone, Debug)] -pub enum TransportConfig { - /// Normal transport mode. - Normal { - /// If true, the network will use mDNS to discover other libp2p nodes on the local network - /// and connect to them if they support the same chain. - enable_mdns: bool, - - /// If true, allow connecting to private IPv4 addresses (as defined in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have - /// been passed in `::sc_network::config::NetworkConfiguration::boot_nodes`. - allow_private_ipv4: bool, - }, - - /// Only allow connections within the same process. - /// Only addresses of the form `/memory/...` will be supported. - MemoryOnly, -} - -/// The policy for connections to non-reserved peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, -} - -impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(Self::Accept), - "deny" => Some(Self::Deny), - _ => None, - } - } -} diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 36e67f11e5cff..9fbedc542c123 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -19,16 +19,6 @@ //! Common data structures of the networking layer. pub mod config; -pub mod error; pub mod message; -pub mod protocol; pub mod request_responses; -pub mod service; pub mod sync; -pub mod utils; - -/// Minimum Requirements for a Hash within Networking -pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static -{} diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs deleted file mode 100644 index 04bfaedbcac71..0000000000000 --- a/client/network/common/src/protocol.rs +++ /dev/null @@ -1,147 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::{ - borrow::Borrow, - fmt, - hash::{Hash, Hasher}, - ops::Deref, - sync::Arc, -}; - -use libp2p::core::upgrade; - -pub mod event; -pub mod role; - -/// The protocol name transmitted on the wire. -#[derive(Debug, Clone)] -pub enum ProtocolName { - /// The protocol name as a static string. - Static(&'static str), - /// The protocol name as a dynamically allocated string. - OnHeap(Arc), -} - -impl From<&'static str> for ProtocolName { - fn from(name: &'static str) -> Self { - Self::Static(name) - } -} - -impl From> for ProtocolName { - fn from(name: Arc) -> Self { - Self::OnHeap(name) - } -} - -impl From for ProtocolName { - fn from(name: String) -> Self { - Self::OnHeap(Arc::from(name)) - } -} - -impl Deref for ProtocolName { - type Target = str; - - fn deref(&self) -> &str { - match self { - Self::Static(name) => name, - Self::OnHeap(name) => &name, - } - } -} - -impl Borrow for ProtocolName { - fn borrow(&self) -> &str { - self - } -} - -impl PartialEq for ProtocolName { - fn eq(&self, other: &Self) -> bool { - (self as &str) == (other as &str) - } -} - -impl Eq for ProtocolName {} - -impl Hash for ProtocolName { - fn hash(&self, state: &mut H) { - (self as &str).hash(state) - } -} - -impl fmt::Display for ProtocolName { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self) - } -} - -impl upgrade::ProtocolName for ProtocolName { - fn protocol_name(&self) -> &[u8] { - (self as &str).as_bytes() - } -} - -#[cfg(test)] -mod tests { - use super::ProtocolName; - use std::{ - borrow::Borrow, - collections::hash_map::DefaultHasher, - hash::{Hash, Hasher}, - }; - - #[test] - fn protocol_name_keys_are_equivalent_to_str_keys() { - const PROTOCOL: &'static str = "/some/protocol/1"; - let static_protocol_name = ProtocolName::from(PROTOCOL); - let on_heap_protocol_name = ProtocolName::from(String::from(PROTOCOL)); - - assert_eq!(>::borrow(&static_protocol_name), PROTOCOL); - assert_eq!(>::borrow(&on_heap_protocol_name), PROTOCOL); - assert_eq!(static_protocol_name, on_heap_protocol_name); - - assert_eq!(hash(static_protocol_name), hash(PROTOCOL)); - assert_eq!(hash(on_heap_protocol_name), hash(PROTOCOL)); - } - - #[test] - fn different_protocol_names_do_not_compare_equal() { - const PROTOCOL1: &'static str = "/some/protocol/1"; - let static_protocol_name1 = ProtocolName::from(PROTOCOL1); - let on_heap_protocol_name1 = ProtocolName::from(String::from(PROTOCOL1)); - - const PROTOCOL2: &'static str = "/some/protocol/2"; - let static_protocol_name2 = ProtocolName::from(PROTOCOL2); - let on_heap_protocol_name2 = ProtocolName::from(String::from(PROTOCOL2)); - - assert_ne!(>::borrow(&static_protocol_name1), PROTOCOL2); - assert_ne!(>::borrow(&on_heap_protocol_name1), PROTOCOL2); - assert_ne!(static_protocol_name1, static_protocol_name2); - assert_ne!(static_protocol_name1, on_heap_protocol_name2); - assert_ne!(on_heap_protocol_name1, on_heap_protocol_name2); - } - - fn hash(x: T) -> u64 { - let mut hasher = DefaultHasher::new(); - x.hash(&mut hasher); - hasher.finish() - } -} diff --git a/client/network/common/src/protocol/role.rs b/client/network/common/src/protocol/role.rs deleted file mode 100644 index ed22830fd7170..0000000000000 --- a/client/network/common/src/protocol/role.rs +++ /dev/null @@ -1,121 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use codec::{self, Encode, EncodeLike, Input, Output}; - -/// Role that the peer sent to us during the handshake, with the addition of what our local node -/// knows about that peer. -/// -/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a -/// > node says about itself, while `ObservedRole` is a `Role` merged with the -/// > information known locally about that node. -#[derive(Debug, Clone)] -pub enum ObservedRole { - /// Full node. - Full, - /// Light node. - Light, - /// Third-party authority. - Authority, -} - -impl ObservedRole { - /// Returns `true` for `ObservedRole::Light`. - pub fn is_light(&self) -> bool { - matches!(self, Self::Light) - } -} - -/// Role of the local node. -#[derive(Debug, Clone)] -pub enum Role { - /// Regular full node. - Full, - /// Actual authority. - Authority, -} - -impl Role { - /// True for [`Role::Authority`]. - pub fn is_authority(&self) -> bool { - matches!(self, Self::Authority) - } -} - -impl std::fmt::Display for Role { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Full => write!(f, "FULL"), - Self::Authority => write!(f, "AUTHORITY"), - } - } -} - -bitflags::bitflags! { - /// Bitmask of the roles that a node fulfills. - pub struct Roles: u8 { - /// No network. - const NONE = 0b00000000; - /// Full node, does not participate in consensus. - const FULL = 0b00000001; - /// Light client node. - const LIGHT = 0b00000010; - /// Act as an authority - const AUTHORITY = 0b00000100; - } -} - -impl Roles { - /// Does this role represents a client that holds full chain data locally? - pub fn is_full(&self) -> bool { - self.intersects(Self::FULL | Self::AUTHORITY) - } - - /// Does this role represents a client that does not participates in the consensus? - pub fn is_authority(&self) -> bool { - *self == Self::AUTHORITY - } - - /// Does this role represents a client that does not hold full chain data locally? - pub fn is_light(&self) -> bool { - !self.is_full() - } -} - -impl<'a> From<&'a Role> for Roles { - fn from(roles: &'a Role) -> Self { - match roles { - Role::Full => Self::FULL, - Role::Authority => Self::AUTHORITY, - } - } -} - -impl Encode for Roles { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } -} - -impl EncodeLike for Roles {} - -impl codec::Decode for Roles { - fn decode(input: &mut I) -> Result { - Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) - } -} diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index 1a8d48e11be53..71570e6beb864 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -18,20 +18,16 @@ //! Collection of generic data structures for request-response protocols. -use crate::protocol::ProtocolName; use futures::channel::{mpsc, oneshot}; -use libp2p::{request_response::OutboundFailure, PeerId}; +use libp2p::PeerId; use sc_peerset::ReputationChange; -use std::time::Duration; +use std::{borrow::Cow, time::Duration}; /// Configuration for a single request-response protocol. #[derive(Debug, Clone)] pub struct ProtocolConfig { /// Name of the protocol on the wire. Should be something like `/foo/bar`. - pub name: ProtocolName, - - /// Fallback on the wire protocol names to support. - pub fallback_names: Vec, + pub name: Cow<'static, str>, /// Maximum allowed size, in bytes, of a request. /// @@ -116,40 +112,3 @@ pub struct OutgoingResponse { /// > written to the buffer managed by the operating system. pub sent_feedback: Option>, } - -/// When sending a request, what to do on a disconnected recipient. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum IfDisconnected { - /// Try to connect to the peer. - TryConnect, - /// Just fail if the destination is not yet connected. - ImmediateError, -} - -/// Convenience functions for `IfDisconnected`. -impl IfDisconnected { - /// Shall we connect to a disconnected peer? - pub fn should_connect(self) -> bool { - match self { - Self::TryConnect => true, - Self::ImmediateError => false, - } - } -} - -/// Error in a request. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum RequestFailure { - #[error("We are not currently connected to the requested peer.")] - NotConnected, - #[error("Given protocol hasn't been registered.")] - UnknownProtocol, - #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] - Refused, - #[error("The remote replied, but the local node is no longer interested in the response.")] - Obsolete, - /// Problem on the network. - #[error("Problem on the network: {0}")] - Network(OutboundFailure), -} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs deleted file mode 100644 index 54d254eac384f..0000000000000 --- a/client/network/common/src/service.rs +++ /dev/null @@ -1,631 +0,0 @@ -// This file is part of Substrate. -// -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -// -// If you read this, you are very thorough, congratulations. - -use crate::{ - config::MultiaddrWithPeerId, - protocol::{event::Event, ProtocolName}, - request_responses::{IfDisconnected, RequestFailure}, - sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, -}; -use futures::{channel::oneshot, Stream}; -pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; -use libp2p::{Multiaddr, PeerId}; -use sc_peerset::ReputationChange; -pub use signature::Signature; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc}; - -mod signature; - -/// Signer with network identity -pub trait NetworkSigner { - /// Signs the message with the `KeyPair` that defines the local [`PeerId`]. - fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result; -} - -impl NetworkSigner for Arc -where - T: ?Sized, - T: NetworkSigner, -{ - fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { - T::sign_with_local_identity(self, msg) - } -} - -/// Provides access to the networking DHT. -pub trait NetworkDHTProvider { - /// Start getting a value from the DHT. - fn get_value(&self, key: &KademliaKey); - - /// Start putting a value in the DHT. - fn put_value(&self, key: KademliaKey, value: Vec); -} - -impl NetworkDHTProvider for Arc -where - T: ?Sized, - T: NetworkDHTProvider, -{ - fn get_value(&self, key: &KademliaKey) { - T::get_value(self, key) - } - - fn put_value(&self, key: KademliaKey, value: Vec) { - T::put_value(self, key, value) - } -} - -/// Provides an ability to set a fork sync request for a particular block. -pub trait NetworkSyncForkRequest { - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber); -} - -impl NetworkSyncForkRequest for Arc -where - T: ?Sized, - T: NetworkSyncForkRequest, -{ - fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber) { - T::set_sync_fork_request(self, peers, hash, number) - } -} - -/// Overview status of the network. -#[derive(Clone)] -pub struct NetworkStatus { - /// Current global sync state. - pub sync_state: SyncState>, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_sync_peers: u32, - /// Total number of connected peers - pub num_connected_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// The total number of bytes received. - pub total_bytes_inbound: u64, - /// The total number of bytes sent. - pub total_bytes_outbound: u64, - /// State sync in progress. - pub state_sync: Option, - /// Warp sync in progress. - pub warp_sync: Option>, -} - -/// Provides high-level status information about network. -#[async_trait::async_trait] -pub trait NetworkStatusProvider { - /// High-level network status information. - /// - /// Returns an error if the `NetworkWorker` is no longer running. - async fn status(&self) -> Result, ()>; -} - -// Manual implementation to avoid extra boxing here -impl NetworkStatusProvider for Arc -where - T: ?Sized, - T: NetworkStatusProvider, -{ - fn status<'life0, 'async_trait>( - &'life0 self, - ) -> Pin, ()>> + Send + 'async_trait>> - where - 'life0: 'async_trait, - Self: 'async_trait, - { - T::status(self) - } -} - -/// Provides low-level API for manipulating network peers. -pub trait NetworkPeers { - /// Set authorized peers. - /// - /// Need a better solution to manage authorized peers, but now just use reserved peers for - /// prototyping. - fn set_authorized_peers(&self, peers: HashSet); - - /// Set authorized_only flag. - /// - /// Need a better solution to decide authorized_only, but now just use reserved_only flag for - /// prototyping. - fn set_authorized_only(&self, reserved_only: bool); - - /// Adds an address known to a node. - fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); - - /// Report a given peer as either beneficial (+) or costly (-) according to the - /// given scalar. - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); - - /// Disconnect from a node as soon as possible. - /// - /// This triggers the same effects as if the connection had closed itself spontaneously. - /// - /// See also [`NetworkPeers::remove_from_peers_set`], which has the same effect but also - /// prevents the local node from re-establishing an outgoing substream to this peer until it - /// is added again. - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); - - /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. - fn accept_unreserved_peers(&self); - - /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing - /// purposes. - fn deny_unreserved_peers(&self); - - /// Adds a `PeerId` and its `Multiaddr` as reserved. - /// - /// Returns an `Err` if the given string is not a valid multiaddress - /// or contains an invalid peer ID (which includes the local peer ID). - fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>; - - /// Removes a `PeerId` from the list of reserved peers. - fn remove_reserved_peer(&self, peer_id: PeerId); - - /// Sets the reserved set of a protocol to the given set of peers. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// The node will start establishing/accepting connections and substreams to/from peers in this - /// set, if it doesn't have any substream open with them yet. - /// - /// Note however, if a call to this function results in less peers on the reserved set, they - /// will not necessarily get disconnected (depending on available free slots in the peer set). - /// If you want to also disconnect those removed peers, you will have to call - /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit - /// this step if the peer set is in reserved only mode. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn set_reserved_peers( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String>; - - /// Add peers to a peer set. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn add_peers_to_reserved_set( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String>; - - /// Remove peers from a peer set. - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec); - - /// Add a peer to a set of peers. - /// - /// If the set has slots available, it will try to open a substream with this peer. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn add_to_peers_set( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String>; - - /// Remove peers from a peer set. - /// - /// If we currently have an open substream with this peer, it will soon be closed. - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec); - - /// Returns the number of peers in the sync peer set we're connected to. - fn sync_num_connected(&self) -> usize; -} - -// Manual implementation to avoid extra boxing here -impl NetworkPeers for Arc -where - T: ?Sized, - T: NetworkPeers, -{ - fn set_authorized_peers(&self, peers: HashSet) { - T::set_authorized_peers(self, peers) - } - - fn set_authorized_only(&self, reserved_only: bool) { - T::set_authorized_only(self, reserved_only) - } - - fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { - T::add_known_address(self, peer_id, addr) - } - - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - T::report_peer(self, who, cost_benefit) - } - - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - T::disconnect_peer(self, who, protocol) - } - - fn accept_unreserved_peers(&self) { - T::accept_unreserved_peers(self) - } - - fn deny_unreserved_peers(&self) { - T::deny_unreserved_peers(self) - } - - fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { - T::add_reserved_peer(self, peer) - } - - fn remove_reserved_peer(&self, peer_id: PeerId) { - T::remove_reserved_peer(self, peer_id) - } - - fn set_reserved_peers( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String> { - T::set_reserved_peers(self, protocol, peers) - } - - fn add_peers_to_reserved_set( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String> { - T::add_peers_to_reserved_set(self, protocol, peers) - } - - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec) { - T::remove_peers_from_reserved_set(self, protocol, peers) - } - - fn add_to_peers_set( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String> { - T::add_to_peers_set(self, protocol, peers) - } - - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec) { - T::remove_from_peers_set(self, protocol, peers) - } - - fn sync_num_connected(&self) -> usize { - T::sync_num_connected(self) - } -} - -/// Provides access to network-level event stream. -pub trait NetworkEventStream { - /// Returns a stream containing the events that happen on the network. - /// - /// If this method is called multiple times, the events are duplicated. - /// - /// The stream never ends (unless the `NetworkWorker` gets shut down). - /// - /// The name passed is used to identify the channel in the Prometheus metrics. Note that the - /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having - /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory - fn event_stream(&self, name: &'static str) -> Pin + Send>>; -} - -impl NetworkEventStream for Arc -where - T: ?Sized, - T: NetworkEventStream, -{ - fn event_stream(&self, name: &'static str) -> Pin + Send>> { - T::event_stream(self, name) - } -} - -/// Trait for providing information about the local network state -pub trait NetworkStateInfo { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec; - - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId; -} - -impl NetworkStateInfo for Arc -where - T: ?Sized, - T: NetworkStateInfo, -{ - fn external_addresses(&self) -> Vec { - T::external_addresses(self) - } - - fn local_peer_id(&self) -> PeerId { - T::local_peer_id(self) - } -} - -/// Reserved slot in the notifications buffer, ready to accept data. -pub trait NotificationSenderReady { - /// Consumes this slots reservation and actually queues the notification. - /// - /// NOTE: Traits can't consume itself, but calling this method second time will return an error. - fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError>; -} - -/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. -#[async_trait::async_trait] -pub trait NotificationSender: Send + Sync + 'static { - /// Returns a future that resolves when the `NotificationSender` is ready to send a - /// notification. - async fn ready(&self) - -> Result, NotificationSenderError>; -} - -/// Error returned by [`NetworkNotification::notification_sender`]. -#[derive(Debug, thiserror::Error)] -pub enum NotificationSenderError { - /// The notification receiver has been closed, usually because the underlying connection - /// closed. - /// - /// Some of the notifications most recently sent may not have been received. However, - /// the peer may still be connected and a new `NotificationSender` for the same - /// protocol obtained from [`NetworkNotification::notification_sender`]. - #[error("The notification receiver has been closed")] - Closed, - /// Protocol name hasn't been registered. - #[error("Protocol name hasn't been registered")] - BadProtocol, -} - -/// Provides ability to send network notifications. -pub trait NetworkNotification { - /// Appends a notification to the buffer of pending outgoing notifications with the given peer. - /// Has no effect if the notifications channel with this protocol name is not open. - /// - /// If the buffer of pending outgoing notifications with that peer is full, the notification - /// is silently dropped and the connection to the remote will start being shut down. This - /// happens if you call this method at a higher rate than the rate at which the peer processes - /// these notifications, or if the available network bandwidth is too low. - /// - /// For this reason, this method is considered soft-deprecated. You are encouraged to use - /// [`NetworkNotification::notification_sender`] instead. - /// - /// > **Note**: The reason why this is a no-op in the situation where we have no channel is - /// > that we don't guarantee message delivery anyway. Networking issues can cause - /// > connections to drop at any time, and higher-level logic shouldn't differentiate - /// > between the remote voluntarily closing a substream or a network error - /// > preventing the message from being delivered. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec); - - /// Obtains a [`NotificationSender`] for a connected peer, if it exists. - /// - /// A `NotificationSender` is scoped to a particular connection to the peer that holds - /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two - /// steps: - /// - /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready - /// for another notification, yielding a [`NotificationSenderReady`] token. - /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation - /// can only fail if the underlying notification substream or connection has suddenly closed. - /// - /// An error is returned by [`NotificationSenderReady::send`] if there exists no open - /// notifications substream with that combination of peer and protocol, or if the remote - /// has asked to close the notifications substream. If that happens, it is guaranteed that an - /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by - /// [`NetworkEventStream::event_stream`]. - /// - /// If the remote requests to close the notifications substream, all notifications successfully - /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the - /// substream actually gets closed, but attempting to enqueue more notifications will now - /// return an error. It is however possible for the entire connection to be abruptly closed, - /// in which case enqueued notifications will be lost. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - /// - /// # Usage - /// - /// This method returns a struct that allows waiting until there is space available in the - /// buffer of messages towards the given peer. If the peer processes notifications at a slower - /// rate than we send them, this buffer will quickly fill up. - /// - /// As such, you should never do something like this: - /// - /// ```ignore - /// // Do NOT do this - /// for peer in peers { - /// if let Ok(n) = network.notification_sender(peer, ...) { - /// if let Ok(s) = n.ready().await { - /// let _ = s.send(...); - /// } - /// } - /// } - /// ``` - /// - /// Doing so would slow down all peers to the rate of the slowest one. A malicious or - /// malfunctioning peer could intentionally process notifications at a very slow rate. - /// - /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one - /// maintained by `sc-network`, and use `notification_sender` to progressively send out - /// elements from your buffer. If this additional buffer is full (which will happen at some - /// point if the peer is too slow to process notifications), appropriate measures can be taken, - /// such as removing non-critical notifications from the buffer or disconnecting the peer - /// using [`NetworkPeers::disconnect_peer`]. - /// - /// - /// Notifications Per-peer buffer - /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet - /// ^ (not covered by - /// | sc-network) - /// + - /// Notifications should be dropped - /// if buffer is full - /// - /// - /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. - fn notification_sender( - &self, - target: PeerId, - protocol: ProtocolName, - ) -> Result, NotificationSenderError>; -} - -impl NetworkNotification for Arc -where - T: ?Sized, - T: NetworkNotification, -{ - fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec) { - T::write_notification(self, target, protocol, message) - } - - fn notification_sender( - &self, - target: PeerId, - protocol: ProtocolName, - ) -> Result, NotificationSenderError> { - T::notification_sender(self, target, protocol) - } -} - -/// Provides ability to send network requests. -#[async_trait::async_trait] -pub trait NetworkRequest { - /// Sends a single targeted request to a specific peer. On success, returns the response of - /// the peer. - /// - /// Request-response protocols are a way to complement notifications protocols, but - /// notifications should remain the default ways of communicating information. For example, a - /// peer can announce something through a notification, after which the recipient can obtain - /// more information by performing a request. - /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way - /// you will get an error immediately for disconnected peers, instead of waiting for a - /// potentially very long connection attempt, which would suggest that something is wrong - /// anyway, as you are supposed to be connected because of the notification protocol. - /// - /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. - /// Such restrictions, if desired, need to be enforced at the call site(s). - /// - /// The protocol must have been registered through - /// `NetworkConfiguration::request_response_protocols`. - async fn request( - &self, - target: PeerId, - protocol: ProtocolName, - request: Vec, - connect: IfDisconnected, - ) -> Result, RequestFailure>; - - /// Variation of `request` which starts a request whose response is delivered on a provided - /// channel. - /// - /// Instead of blocking and waiting for a reply, this function returns immediately, sending - /// responses via the passed in sender. This alternative API exists to make it easier to - /// integrate with message passing APIs. - /// - /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a - /// closing connection. This is expected behaviour. With `request` you would get a - /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. - fn start_request( - &self, - target: PeerId, - protocol: ProtocolName, - request: Vec, - tx: oneshot::Sender, RequestFailure>>, - connect: IfDisconnected, - ); -} - -// Manual implementation to avoid extra boxing here -impl NetworkRequest for Arc -where - T: ?Sized, - T: NetworkRequest, -{ - fn request<'life0, 'async_trait>( - &'life0 self, - target: PeerId, - protocol: ProtocolName, - request: Vec, - connect: IfDisconnected, - ) -> Pin, RequestFailure>> + Send + 'async_trait>> - where - 'life0: 'async_trait, - Self: 'async_trait, - { - T::request(self, target, protocol, request, connect) - } - - fn start_request( - &self, - target: PeerId, - protocol: ProtocolName, - request: Vec, - tx: oneshot::Sender, RequestFailure>>, - connect: IfDisconnected, - ) { - T::start_request(self, target, protocol, request, tx, connect) - } -} - -/// Provides ability to announce blocks to the network. -pub trait NetworkBlock { - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. This function forces such an announcement. - fn announce_block(&self, hash: BlockHash, data: Option>); - - /// Inform the network service about new best imported block. - fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber); -} - -impl NetworkBlock for Arc -where - T: ?Sized, - T: NetworkBlock, -{ - fn announce_block(&self, hash: BlockHash, data: Option>) { - T::announce_block(self, hash, data) - } - - fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber) { - T::new_best_block_imported(self, hash, number) - } -} diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index dd216b2a5295a..2ee8f8c51814b 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -44,20 +44,11 @@ pub struct PeerInfo { /// Reported sync state. #[derive(Clone, Eq, PartialEq, Debug)] -pub enum SyncState { +pub enum SyncState { /// Initial sync is complete, keep-up sync is active. Idle, /// Actively catching up with the chain. - Downloading { target: BlockNumber }, - /// All blocks are downloaded and are being imported. - Importing { target: BlockNumber }, -} - -impl SyncState { - /// Are we actively catching up with the chain? - pub fn is_major_syncing(&self) -> bool { - !matches!(self, SyncState::Idle) - } + Downloading, } /// Reported state download progress. @@ -73,7 +64,7 @@ pub struct StateDownloadProgress { #[derive(Clone)] pub struct SyncStatus { /// Current global sync state. - pub state: SyncState>, + pub state: SyncState, /// Target sync block number. pub best_seen_block: Option>, /// Number of peers participating in syncing. @@ -105,8 +96,6 @@ pub enum OnBlockData { Import(BlockOrigin, Vec>), /// A new block request needs to be made to the given peer. Request(PeerId, BlockRequest), - /// Continue processing events. - Continue, } /// Result of [`ChainSync::on_block_justification`]. @@ -278,14 +267,12 @@ pub trait ChainSync: Send { ); /// Get an iterator over all scheduled justification requests. - fn justification_requests<'a>( - &'a mut self, - ) -> Box)> + 'a>; + fn justification_requests( + &mut self, + ) -> Box)> + '_>; /// Get an iterator over all block requests of all peers. - fn block_requests<'a>( - &'a mut self, - ) -> Box)> + 'a>; + fn block_requests(&mut self) -> Box)> + '_>; /// Get a state request, if any. fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; @@ -370,9 +357,9 @@ pub trait ChainSync: Send { /// /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to /// import passed header (call `on_block_data`). The network request isn't sent in this case. - fn poll_block_announce_validation<'a>( + fn poll_block_announce_validation( &mut self, - cx: &mut std::task::Context<'a>, + cx: &mut std::task::Context, ) -> Poll>; /// Call when a peer has disconnected. @@ -404,14 +391,4 @@ pub trait ChainSync: Send { /// Decode implementation-specific state response. fn decode_state_response(&self, response: &[u8]) -> Result; - - /// Advance the state of `ChainSync` - /// - /// Internally calls [`ChainSync::poll_block_announce_validation()`] and - /// this function should be polled until it returns [`Poll::Pending`] to - /// consume all pending events. - fn poll( - &mut self, - cx: &mut std::task::Context, - ) -> Poll>; } diff --git a/client/network/common/src/sync/message.rs b/client/network/common/src/sync/message.rs index 346f1dbce9bcc..27ab2704e6471 100644 --- a/client/network/common/src/sync/message.rs +++ b/client/network/common/src/sync/message.rs @@ -19,12 +19,10 @@ //! Network packet message types. These get serialized and put into the lower level protocol //! payload. -use crate::protocol::role::Roles; - use bitflags::bitflags; use codec::{Decode, Encode, Error, Input, Output}; pub use generic::{BlockAnnounce, FromBlock}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; /// Type alias for using the block request type using block type parameters. pub type BlockRequest = @@ -160,6 +158,8 @@ pub mod generic { pub fields: BlockAttributes, /// Start from this block. pub from: FromBlock, + /// End at this block. An implementation defined maximum is used when unspecified. + pub to: Option, /// Sequence direction. pub direction: Direction, /// Maximum number of blocks to return. An implementation defined maximum is used when @@ -220,27 +220,3 @@ pub mod generic { } } } - -/// Handshake sent when we open a block announces substream. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct BlockAnnouncesHandshake { - /// Roles of the node. - pub roles: Roles, - /// Best block number. - pub best_number: NumberFor, - /// Best block hash. - pub best_hash: B::Hash, - /// Genesis block hash. - pub genesis_hash: B::Hash, -} - -impl BlockAnnouncesHandshake { - pub fn build( - roles: Roles, - best_number: NumberFor, - best_hash: B::Hash, - genesis_hash: B::Hash, - ) -> Self { - Self { genesis_hash, roles, best_number, best_hash } - } -} diff --git a/client/network/common/src/sync/warp.rs b/client/network/common/src/sync/warp.rs index c9b9037542388..339a4c33a7eeb 100644 --- a/client/network/common/src/sync/warp.rs +++ b/client/network/common/src/sync/warp.rs @@ -64,8 +64,6 @@ pub enum WarpSyncPhase { AwaitingPeers, /// Downloading and verifying grandpa warp proofs. DownloadingWarpProofs, - /// Downloading target block. - DownloadingTargetBlock, /// Downloading state data. DownloadingState, /// Importing state. @@ -79,7 +77,6 @@ impl fmt::Display for WarpSyncPhase { match self { Self::AwaitingPeers => write!(f, "Waiting for peers"), Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), - Self::DownloadingTargetBlock => write!(f, "Downloading target block"), Self::DownloadingState => write!(f, "Downloading state"), Self::ImportingState => write!(f, "Importing state"), Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index cd3be390d48c8..0037177fb4046 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -14,17 +14,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.11" +prost-build = "0.10" [dependencies] -array-bytes = "4.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.49.0" +libp2p = "0.46.1" log = "0.4.16" -prost = "0.11" +prost = "0.10" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-network-common = { version = "0.10.0-dev", path = "../common" } diff --git a/client/network/light/src/light_client_requests.rs b/client/network/light/src/light_client_requests.rs index 61b549d0f0984..9eccef41e833d 100644 --- a/client/network/light/src/light_client_requests.rs +++ b/client/network/light/src/light_client_requests.rs @@ -25,32 +25,16 @@ use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; use std::time::Duration; -/// Generate the light client protocol name from the genesis hash and fork id. -fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!("/{}/{}/light/2", array_bytes::bytes2hex("", genesis_hash), fork_id) - } else { - format!("/{}/light/2", array_bytes::bytes2hex("", genesis_hash)) - } -} - -/// Generate the legacy light client protocol name from chain specific protocol identifier. -fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the light client protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/light/2", protocol_id.as_ref()) } /// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming /// requests. -pub fn generate_protocol_config>( - protocol_id: &ProtocolId, - genesis_hash: Hash, - fork_id: Option<&str>, -) -> ProtocolConfig { +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(genesis_hash, fork_id).into(), - fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) - .collect(), + name: generate_protocol_name(protocol_id).into(), max_request_size: 1 * 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(15), diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 77904c7256295..3c87ccfd6ed9f 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -28,7 +28,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::PeerId; use log::{debug, trace}; use prost::Message; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{ProofProvider, StorageProof}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -38,7 +38,7 @@ use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, }; -use sp_runtime::traits::Block; +use sp_runtime::{generic::BlockId, traits::Block}; use std::{marker::PhantomData, sync::Arc}; const LOG_TARGET: &str = "light-client-request-handler"; @@ -54,27 +54,15 @@ pub struct LightClientRequestHandler { impl LightClientRequestHandler where B: Block, - Client: BlockBackend + ProofProvider + Send + Sync + 'static, + Client: ProofProvider + Send + Sync + 'static, { /// Create a new [`LightClientRequestHandler`]. - pub fn new( - protocol_id: &ProtocolId, - fork_id: Option<&str>, - client: Arc, - ) -> (Self, ProtocolConfig) { + pub fn new(protocol_id: &ProtocolId, client: Arc) -> (Self, ProtocolConfig) { // For now due to lack of data on light client request handling in production systems, this // value is chosen to match the block request limit. let (tx, request_receiver) = mpsc::channel(20); - let mut protocol_config = super::generate_protocol_config( - protocol_id, - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - fork_id, - ); + let mut protocol_config = super::generate_protocol_config(protocol_id); protocol_config.inbound_queue = Some(tx); (Self { client, request_receiver, _block: PhantomData::default() }, protocol_config) @@ -151,8 +139,12 @@ where self.on_remote_call_request(&peer, r)?, Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => self.on_remote_read_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteHeaderRequest(_r)) => + return Err(HandleRequestError::BadRequest("Not supported.")), Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteChangesRequest(_r)) => + return Err(HandleRequestError::BadRequest("Not supported.")), None => return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; @@ -172,24 +164,30 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let response = match self.client.execution_proof(block, &request.method, &request.data) { - Ok((_, proof)) => { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; - Some(schema::v1::light::response::Response::RemoteCallResponse(r)) - }, - Err(e) => { - trace!( - "remote call request from {} ({} at {:?}) failed with: {}", - peer, - request.method, - request.block, - e, - ); - None - }, + let proof = + match self + .client + .execution_proof(&BlockId::Hash(block), &request.method, &request.data) + { + Ok((_, proof)) => proof, + Err(e) => { + trace!( + "remote call request from {} ({} at {:?}) failed with: {}", + peer, + request.method, + request.block, + e, + ); + StorageProof::empty() + }, + }; + + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteCallResponse(r) }; - Ok(schema::v1::light::Response { response }) + Ok(schema::v1::light::Response { response: Some(response) }) } fn on_remote_read_request( @@ -211,25 +209,29 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let response = - match self.client.read_proof(block, &mut request.keys.iter().map(AsRef::as_ref)) { - Ok(proof) => { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - Some(schema::v1::light::response::Response::RemoteReadResponse(r)) - }, - Err(error) => { - trace!( - "remote read request from {} ({} at {:?}) failed with: {}", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error, - ); - None - }, - }; + let proof = match self + .client + .read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) + { + Ok(proof) => proof, + Err(error) => { + trace!( + "remote read request from {} ({} at {:?}) failed with: {}", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error, + ); + StorageProof::empty() + }, + }; - Ok(schema::v1::light::Response { response }) + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) } fn on_remote_read_child_request( @@ -257,17 +259,14 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err(sp_blockchain::Error::InvalidChildStorageKey), }; - let response = match child_info.and_then(|child_info| { + let proof = match child_info.and_then(|child_info| { self.client.read_child_proof( - block, + &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), ) }) { - Ok(proof) => { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - Some(schema::v1::light::response::Response::RemoteReadResponse(r)) - }, + Ok(proof) => proof, Err(error) => { trace!( "remote read child request from {} ({} {} at {:?}) failed with: {}", @@ -277,11 +276,16 @@ where request.block, error, ); - None + StorageProof::empty() }, }; - Ok(schema::v1::light::Response { response }) + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) } } diff --git a/client/network/light/src/schema/light.v1.proto b/client/network/light/src/schema/light.v1.proto index 1df5466e21988..9b5d47719dc28 100644 --- a/client/network/light/src/schema/light.v1.proto +++ b/client/network/light/src/schema/light.v1.proto @@ -17,8 +17,9 @@ message Request { oneof request { RemoteCallRequest remote_call_request = 1; RemoteReadRequest remote_read_request = 2; + RemoteHeaderRequest remote_header_request = 3; RemoteReadChildRequest remote_read_child_request = 4; - // Note: ids 3 and 5 were used in the past. It would be preferable to not re-use them. + RemoteChangesRequest remote_changes_request = 5; } } @@ -27,7 +28,8 @@ message Response { oneof response { RemoteCallResponse remote_call_response = 1; RemoteReadResponse remote_read_response = 2; - // Note: ids 3 and 4 were used in the past. It would be preferable to not re-use them. + RemoteHeaderResponse remote_header_response = 3; + RemoteChangesResponse remote_changes_response = 4; } } @@ -71,3 +73,48 @@ message RemoteReadChildRequest { // Storage keys. repeated bytes keys = 6; } + +// Remote header request. +message RemoteHeaderRequest { + // Block number to request header for. + bytes block = 2; +} + +// Remote header response. +message RemoteHeaderResponse { + // Header. None if proof generation has failed (e.g. header is unknown). + bytes header = 2; // optional + // Header proof. + bytes proof = 3; +} + +/// Remote changes request. +message RemoteChangesRequest { + // Hash of the first block of the range (including first) where changes are requested. + bytes first = 2; + // Hash of the last block of the range (including last) where changes are requested. + bytes last = 3; + // Hash of the first block for which the requester has the changes trie root. All other + // affected roots must be proved. + bytes min = 4; + // Hash of the last block that we can use when querying changes. + bytes max = 5; + // Storage child node key which changes are requested. + bytes storage_key = 6; // optional + // Storage key which changes are requested. + bytes key = 7; +} + +// Remote changes response. +message RemoteChangesResponse { + // Proof has been generated using block with this number as a max block. Should be + // less than or equal to the RemoteChangesRequest::max block number. + bytes max = 2; + // Changes proof. + repeated bytes proof = 3; + // Changes tries roots missing on the requester' node. + repeated Pair roots = 4; + // Missing changes tries roots proof. + bytes roots_proof = 5; +} + diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 2e646956e9d8c..515608df13d0f 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -17,49 +17,63 @@ // along with this program. If not, see . use crate::{ + bitswap::Bitswap, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, - protocol::{CustomMessageOutcome, NotificationsSink, Protocol}, - request_responses, + protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, + request_responses, DhtEvent, ObservedRole, }; use bytes::Bytes; +use codec::Encode; use futures::channel::oneshot; use libp2p::{ core::{Multiaddr, PeerId, PublicKey}, - identify::Info as IdentifyInfo, + identify::IdentifyInfo, kad::record, + swarm::{ + behaviour::toggle::Toggle, NetworkBehaviour, NetworkBehaviourAction, + NetworkBehaviourEventProcess, PollParameters, + }, NetworkBehaviour, }; +use log::debug; -use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; -use sc_network_common::{ - protocol::{ - event::DhtEvent, - role::{ObservedRole, Roles}, - ProtocolName, - }, - request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, - sync::{warp::WarpProofRequest, OpaqueBlockRequest, OpaqueStateRequest}, -}; -use sc_peerset::{PeersetHandle, ReputationChange}; -use sp_blockchain::HeaderBackend; +use sc_client_api::{BlockBackend, ProofProvider}; +use sc_consensus::import_queue::{IncomingBlock, Origin}; +use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; +use sc_peerset::PeersetHandle; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, Justifications, }; -use std::{collections::HashSet, time::Duration}; +use std::{ + borrow::Cow, + collections::{HashSet, VecDeque}, + iter, + task::{Context, Poll}, + time::Duration, +}; -pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; +pub use crate::request_responses::{ + IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, +}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut")] +#[behaviour(out_event = "BehaviourOut", poll_method = "poll", event_process = true)] pub struct Behaviour where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// All the substrate-specific protocols. substrate: Protocol, @@ -68,17 +82,38 @@ where peer_info: peer_info::PeerInfoBehaviour, /// Discovers nodes of the network. discovery: DiscoveryBehaviour, + /// Bitswap server for blockchain data. + bitswap: Toggle>, /// Generic request-response protocols. request_responses: request_responses::RequestResponsesBehaviour, + + /// Queue of events to produce for the outside. + #[behaviour(ignore)] + events: VecDeque>, + + /// Protocol name used to send out block requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + block_request_protocol_name: String, + + /// Protocol name used to send out state requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + state_request_protocol_name: String, + + /// Protocol name used to send out warp sync requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + warp_sync_protocol_name: Option, } /// Event generated by `Behaviour`. pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), + JustificationImport(Origin, B::Hash, NumberFor, Justifications), /// Started a random iterative Kademlia discovery query. - RandomKademliaStarted, + RandomKademliaStarted(ProtocolId), /// We have received a request from a peer and answered it. /// @@ -87,7 +122,7 @@ pub enum BehaviourOut { /// Peer which sent us a request. peer: PeerId, /// Protocol name of the request. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// If `Ok`, contains the time elapsed between when we received the request and when we /// sent back the response. If `Err`, the error that happened. result: Result, @@ -100,19 +135,13 @@ pub enum BehaviourOut { /// Peer that we send a request to. peer: PeerId, /// Name of the protocol in question. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// Duration the request took. duration: Duration, /// Result of the request. result: Result<(), RequestFailure>, }, - /// A request protocol handler issued reputation changes for the given peer. - ReputationChanges { - peer: PeerId, - changes: Vec, - }, - /// Opened a substream with the given node with the given notifications protocol. /// /// The protocol is always one of the notification protocols that have been registered. @@ -120,12 +149,12 @@ pub enum BehaviourOut { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// See also [`crate::Event::NotificationStreamOpened`]. - negotiated_fallback: Option, + negotiated_fallback: Option>, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, /// Role of the remote. @@ -141,7 +170,7 @@ pub enum BehaviourOut { /// Id of the peer we are connected to. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -152,7 +181,7 @@ pub enum BehaviourOut { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -160,37 +189,7 @@ pub enum BehaviourOut { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ProtocolName, Bytes)>, - }, - - /// A new block request must be emitted. - BlockRequest { - /// Node we send the request to. - target: PeerId, - /// Opaque implementation-specific block request. - request: OpaqueBlockRequest, - /// One-shot channel to receive the response. - pending_response: oneshot::Sender, RequestFailure>>, - }, - - /// A new state request must be emitted. - StateRequest { - /// Node we send the request to. - target: PeerId, - /// Opaque implementation-specific state request. - request: OpaqueStateRequest, - /// One-shot channel to receive the response. - pending_response: oneshot::Sender, RequestFailure>>, - }, - - /// A new warp sync request must be emitted. - WarpSyncRequest { - /// Node we send the request to. - target: PeerId, - /// Warp sync request. - request: WarpProofRequest, - /// One-shot channel to receive the response. - pending_response: oneshot::Sender, RequestFailure>>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, /// Now connected to a new peer for syncing purposes. @@ -199,30 +198,21 @@ pub enum BehaviourOut { /// No longer connected to a peer for syncing purposes. SyncDisconnected(PeerId), - /// We have obtained identity information from a peer, including the addresses it is listening - /// on. - PeerIdentify { - /// Id of the peer that has been identified. - peer_id: PeerId, - /// Information about the peer. - info: IdentifyInfo, - }, - - /// We have learned about the existence of a node on the default set. - Discovered(PeerId), - /// Events generated by a DHT as a response to get_value or put_value requests as well as the /// request duration. Dht(DhtEvent, Duration), - - /// Ignored event generated by lower layers. - None, } impl Behaviour where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Builds a new `Behaviour`. pub fn new( @@ -233,14 +223,23 @@ where block_request_protocol_config: ProtocolConfig, state_request_protocol_config: ProtocolConfig, warp_sync_protocol_config: Option, + bitswap: Option>, light_client_request_protocol_config: ProtocolConfig, // All remaining request protocol configs. mut request_response_protocols: Vec, peerset: PeersetHandle, ) -> Result { - if let Some(config) = warp_sync_protocol_config { - request_response_protocols.push(config); - } + // Extract protocol name and add to `request_response_protocols`. + let block_request_protocol_name = block_request_protocol_config.name.to_string(); + let state_request_protocol_name = state_request_protocol_config.name.to_string(); + let warp_sync_protocol_name = match warp_sync_protocol_config { + Some(config) => { + let name = config.name.to_string(); + request_response_protocols.push(config); + Some(name) + }, + None => None, + }; request_response_protocols.push(block_request_protocol_config); request_response_protocols.push(state_request_protocol_config); request_response_protocols.push(light_client_request_protocol_config); @@ -249,10 +248,15 @@ where substrate, peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), + bitswap: bitswap.into(), request_responses: request_responses::RequestResponsesBehaviour::new( request_response_protocols.into_iter(), peerset, )?, + events: VecDeque::new(), + block_request_protocol_name, + state_request_protocol_name, + warp_sync_protocol_name, }) } @@ -266,20 +270,25 @@ where self.discovery.add_known_address(peer_id, addr) } - /// Returns the number of nodes in each Kademlia kbucket. + /// Returns the number of nodes in each Kademlia kbucket for each Kademlia instance. /// - /// Identifies kbuckets by the base 2 logarithm of their lower bound. - pub fn num_entries_per_kbucket(&mut self) -> Option> { + /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm + /// of their lower bound. + pub fn num_entries_per_kbucket( + &mut self, + ) -> impl ExactSizeIterator)> { self.discovery.num_entries_per_kbucket() } /// Returns the number of records in the Kademlia record stores. - pub fn num_kademlia_records(&mut self) -> Option { + pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { self.discovery.num_kademlia_records() } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> Option { + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { self.discovery.kademlia_records_total_size() } @@ -315,17 +324,6 @@ where &mut self.substrate } - /// Add a self-reported address of a remote peer to the k-buckets of the supported - /// DHTs (`supported_protocols`). - pub fn add_self_reported_address_to_dht( - &mut self, - peer_id: &PeerId, - supported_protocols: &[impl AsRef<[u8]>], - addr: Multiaddr, - ) { - self.discovery.add_self_reported_address(peer_id, supported_protocols, addr); - } - /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a /// `ValueNotFound` event. pub fn get_value(&mut self, key: record::Key) { @@ -349,90 +347,267 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl From> for BehaviourOut { - fn from(event: CustomMessageOutcome) -> Self { +impl NetworkBehaviourEventProcess for Behaviour +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn inject_event(&mut self, event: void::Void) { + void::unreachable(event) + } +} + +impl NetworkBehaviourEventProcess> for Behaviour +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn inject_event(&mut self, event: CustomMessageOutcome) { match event { CustomMessageOutcome::BlockImport(origin, blocks) => - BehaviourOut::BlockImport(origin, blocks), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - BehaviourOut::JustificationImport(origin, hash, nb, justification), - CustomMessageOutcome::BlockRequest { target, request, pending_response } => - BehaviourOut::BlockRequest { target, request, pending_response }, - CustomMessageOutcome::StateRequest { target, request, pending_response } => - BehaviourOut::StateRequest { target, request, pending_response }, + self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => self + .events + .push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), + CustomMessageOutcome::BlockRequest { target, request, pending_response } => { + match self.substrate.encode_block_request(&request) { + Ok(data) => { + self.request_responses.send_request( + &target, + &self.block_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + request, err + ); + }, + } + }, + CustomMessageOutcome::StateRequest { target, request, pending_response } => { + match self.substrate.encode_state_request(&request) { + Ok(data) => { + self.request_responses.send_request( + &target, + &self.state_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode state request {:?}: {:?}", + request, err + ); + }, + } + }, CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => - BehaviourOut::WarpSyncRequest { target, request, pending_response }, + match &self.warp_sync_protocol_name { + Some(name) => self.request_responses.send_request( + &target, + name, + request.encode(), + pending_response, + IfDisconnected::ImmediateError, + ), + None => { + log::warn!( + target: "sync", + "Trying to send warp sync request when no protocol is configured {:?}", + request, + ); + }, + }, CustomMessageOutcome::NotificationStreamOpened { remote, protocol, negotiated_fallback, roles, notifications_sink, - } => BehaviourOut::NotificationStreamOpened { + } => { + self.events.push_back(BehaviourOut::NotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + role: reported_roles_to_observed_role(roles), + notifications_sink, + }); + }, + CustomMessageOutcome::NotificationStreamReplaced { remote, protocol, - negotiated_fallback, - role: reported_roles_to_observed_role(roles), notifications_sink, - }, - CustomMessageOutcome::NotificationStreamReplaced { + } => self.events.push_back(BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink, - } => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => - BehaviourOut::NotificationStreamClosed { remote, protocol }, - CustomMessageOutcome::NotificationsReceived { remote, messages } => - BehaviourOut::NotificationsReceived { remote, messages }, - CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None, - CustomMessageOutcome::SyncConnected(peer_id) => BehaviourOut::SyncConnected(peer_id), + }), + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => self + .events + .push_back(BehaviourOut::NotificationStreamClosed { remote, protocol }), + CustomMessageOutcome::NotificationsReceived { remote, messages } => { + self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); + }, + CustomMessageOutcome::PeerNewBest(_peer_id, _number) => {}, + CustomMessageOutcome::SyncConnected(peer_id) => + self.events.push_back(BehaviourOut::SyncConnected(peer_id)), CustomMessageOutcome::SyncDisconnected(peer_id) => - BehaviourOut::SyncDisconnected(peer_id), - CustomMessageOutcome::None => BehaviourOut::None, + self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)), + CustomMessageOutcome::None => {}, } } } -impl From for BehaviourOut { - fn from(event: request_responses::Event) -> Self { +impl NetworkBehaviourEventProcess for Behaviour +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn inject_event(&mut self, event: request_responses::Event) { match event { - request_responses::Event::InboundRequest { peer, protocol, result } => - BehaviourOut::InboundRequest { peer, protocol, result }, - request_responses::Event::RequestFinished { peer, protocol, duration, result } => - BehaviourOut::RequestFinished { peer, protocol, duration, result }, + request_responses::Event::InboundRequest { peer, protocol, result } => { + self.events.push_back(BehaviourOut::InboundRequest { peer, protocol, result }); + }, + request_responses::Event::RequestFinished { peer, protocol, duration, result } => { + self.events.push_back(BehaviourOut::RequestFinished { + peer, + protocol, + duration, + result, + }); + }, request_responses::Event::ReputationChanges { peer, changes } => - BehaviourOut::ReputationChanges { peer, changes }, + for change in changes { + self.substrate.report_peer(peer, change); + }, } } } -impl From for BehaviourOut { - fn from(event: peer_info::PeerInfoEvent) -> Self { - let peer_info::PeerInfoEvent::Identified { peer_id, info } = event; - BehaviourOut::PeerIdentify { peer_id, info } +impl NetworkBehaviourEventProcess for Behaviour +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { + let peer_info::PeerInfoEvent::Identified { + peer_id, + info: IdentifyInfo { protocol_version, agent_version, mut listen_addrs, protocols, .. }, + } = event; + + if listen_addrs.len() > 30 { + debug!( + target: "sub-libp2p", + "Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}", + peer_id, protocol_version, agent_version + ); + listen_addrs.truncate(30); + } + + for addr in listen_addrs { + self.discovery.add_self_reported_address(&peer_id, protocols.iter(), addr); + } + self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); } } -impl From for BehaviourOut { - fn from(event: DiscoveryOut) -> Self { - match event { +impl NetworkBehaviourEventProcess for Behaviour +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn inject_event(&mut self, out: DiscoveryOut) { + match out { DiscoveryOut::UnroutablePeer(_peer_id) => { // Obtaining and reporting listen addresses for unroutable peers back // to Kademlia is handled by the `Identify` protocol, part of the - // `PeerInfoBehaviour`. See the `From` - // implementation. - BehaviourOut::None + // `PeerInfoBehaviour`. See the `NetworkBehaviourEventProcess` + // implementation for `PeerInfoEvent`. + }, + DiscoveryOut::Discovered(peer_id) => { + self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); + }, + DiscoveryOut::ValueFound(results, duration) => { + self.events + .push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); + }, + DiscoveryOut::ValueNotFound(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration)); + }, + DiscoveryOut::ValuePut(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key), duration)); + }, + DiscoveryOut::ValuePutFailed(key, duration) => { + self.events + .push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); }, - DiscoveryOut::Discovered(peer_id) => BehaviourOut::Discovered(peer_id), - DiscoveryOut::ValueFound(results, duration) => - BehaviourOut::Dht(DhtEvent::ValueFound(results), duration), - DiscoveryOut::ValueNotFound(key, duration) => - BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration), - DiscoveryOut::ValuePut(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePut(key), duration), - DiscoveryOut::ValuePutFailed(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration), - DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, + DiscoveryOut::RandomKademliaStarted(protocols) => + for protocol in protocols { + self.events.push_back(BehaviourOut::RandomKademliaStarted(protocol)); + }, } } } + +impl Behaviour +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn poll( + &mut self, + _cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll, ::ConnectionHandler>> + { + if let Some(event) = self.events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) + } + + Poll::Pending + } +} diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs new file mode 100644 index 0000000000000..2dab45adc5618 --- /dev/null +++ b/client/network/src/bitswap.rs @@ -0,0 +1,340 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Bitswap server for substrate. +//! +//! Allows querying transactions by hash over standard bitswap protocol +//! Only supports bitswap 1.2.0. +//! CID is expected to reference 256-bit Blake2b transaction hash. + +use crate::schema::bitswap::{ + message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType}, + Message as BitswapMessage, +}; +use cid::Version; +use core::pin::Pin; +use futures::{ + io::{AsyncRead, AsyncWrite}, + Future, +}; +use libp2p::{ + core::{ + connection::ConnectionId, upgrade, InboundUpgrade, Multiaddr, OutboundUpgrade, PeerId, + UpgradeInfo, + }, + swarm::{ + NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, OneShotHandler, PollParameters, + }, +}; +use log::{debug, error, trace}; +use prost::Message; +use sc_client_api::BlockBackend; +use sp_runtime::traits::Block as BlockT; +use std::{ + collections::VecDeque, + io, + marker::PhantomData, + sync::Arc, + task::{Context, Poll}, +}; +use unsigned_varint::encode as varint_encode; + +const LOG_TARGET: &str = "bitswap"; + +// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes +// https://github.com/ipfs/js-ipfs-bitswap/blob/ +// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 +// We set it to the same value as max substrate protocol message +const MAX_PACKET_SIZE: usize = 16 * 1024 * 1024; + +// Max number of queued responses before denying requests. +const MAX_RESPONSE_QUEUE: usize = 20; +// Max number of blocks per wantlist +const MAX_WANTED_BLOCKS: usize = 16; + +const PROTOCOL_NAME: &[u8] = b"/ipfs/bitswap/1.2.0"; + +type FutureResult = Pin> + Send>>; + +/// Bitswap protocol config +#[derive(Clone, Copy, Debug, Default)] +pub struct BitswapConfig; + +impl UpgradeInfo for BitswapConfig { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(PROTOCOL_NAME) + } +} + +impl InboundUpgrade for BitswapConfig +where + TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + type Output = BitswapMessage; + type Error = BitswapError; + type Future = FutureResult; + + fn upgrade_inbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { + Box::pin(async move { + let packet = upgrade::read_length_prefixed(&mut socket, MAX_PACKET_SIZE).await?; + let message: BitswapMessage = Message::decode(packet.as_slice())?; + Ok(message) + }) + } +} + +impl UpgradeInfo for BitswapMessage { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(PROTOCOL_NAME) + } +} + +impl OutboundUpgrade for BitswapMessage +where + TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + type Output = (); + type Error = io::Error; + type Future = FutureResult; + + fn upgrade_outbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { + Box::pin(async move { + let data = self.encode_to_vec(); + upgrade::write_length_prefixed(&mut socket, data).await + }) + } +} + +/// Internal protocol handler event. +#[derive(Debug)] +pub enum HandlerEvent { + /// We received a `BitswapMessage` from a remote. + Request(BitswapMessage), + /// We successfully sent a `BitswapMessage`. + ResponseSent, +} + +impl From for HandlerEvent { + fn from(message: BitswapMessage) -> Self { + Self::Request(message) + } +} + +impl From<()> for HandlerEvent { + fn from(_: ()) -> Self { + Self::ResponseSent + } +} + +/// Prefix represents all metadata of a CID, without the actual content. +#[derive(PartialEq, Eq, Clone, Debug)] +struct Prefix { + /// The version of CID. + pub version: Version, + /// The codec of CID. + pub codec: u64, + /// The multihash type of CID. + pub mh_type: u64, + /// The multihash length of CID. + pub mh_len: u8, +} + +impl Prefix { + /// Convert the prefix to encoded bytes. + pub fn to_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(4); + let mut buf = varint_encode::u64_buffer(); + let version = varint_encode::u64(self.version.into(), &mut buf); + res.extend_from_slice(version); + let mut buf = varint_encode::u64_buffer(); + let codec = varint_encode::u64(self.codec, &mut buf); + res.extend_from_slice(codec); + let mut buf = varint_encode::u64_buffer(); + let mh_type = varint_encode::u64(self.mh_type, &mut buf); + res.extend_from_slice(mh_type); + let mut buf = varint_encode::u64_buffer(); + let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); + res.extend_from_slice(mh_len); + res + } +} + +/// Network behaviour that handles sending and receiving IPFS blocks. +pub struct Bitswap { + client: Arc, + ready_blocks: VecDeque<(PeerId, BitswapMessage)>, + _block: PhantomData, +} + +impl Bitswap { + /// Create a new instance of the bitswap protocol handler. + pub fn new(client: Arc) -> Self { + Self { client, ready_blocks: Default::default(), _block: PhantomData::default() } + } +} + +impl NetworkBehaviour for Bitswap +where + B: BlockT, + Client: BlockBackend + Send + Sync + 'static, +{ + type ConnectionHandler = OneShotHandler; + type OutEvent = void::Void; + + fn new_handler(&mut self) -> Self::ConnectionHandler { + Default::default() + } + + fn addresses_of_peer(&mut self, _peer: &PeerId) -> Vec { + Vec::new() + } + + fn inject_event(&mut self, peer: PeerId, _connection: ConnectionId, message: HandlerEvent) { + let request = match message { + HandlerEvent::ResponseSent => return, + HandlerEvent::Request(msg) => msg, + }; + trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); + if self.ready_blocks.len() > MAX_RESPONSE_QUEUE { + debug!(target: LOG_TARGET, "Ignored request: queue is full"); + return + } + let mut response = BitswapMessage { + wantlist: None, + blocks: Default::default(), + payload: Default::default(), + block_presences: Default::default(), + pending_bytes: 0, + }; + let wantlist = match request.wantlist { + Some(wantlist) => wantlist, + None => { + debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); + return + }, + }; + if wantlist.entries.len() > MAX_WANTED_BLOCKS { + trace!(target: LOG_TARGET, "Ignored request: too many entries"); + return + } + for entry in wantlist.entries { + let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { + Ok(cid) => cid, + Err(e) => { + trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); + continue + }, + }; + if cid.version() != cid::Version::V1 || + cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || + cid.hash().size() != 32 + { + debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); + continue + } + let mut hash = B::Hash::default(); + hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); + let transaction = match self.client.indexed_transaction(&hash) { + Ok(ex) => ex, + Err(e) => { + error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); + None + }, + }; + match transaction { + Some(transaction) => { + trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); + if entry.want_type == WantType::Block as i32 { + let prefix = Prefix { + version: cid.version(), + codec: cid.codec(), + mh_type: cid.hash().code(), + mh_len: cid.hash().size(), + }; + response + .payload + .push(MessageBlock { prefix: prefix.to_bytes(), data: transaction }); + } else { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::Have as i32, + cid: cid.to_bytes(), + }); + } + }, + None => { + trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash); + if entry.send_dont_have { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::DontHave as i32, + cid: cid.to_bytes(), + }); + } + }, + } + } + trace!(target: LOG_TARGET, "Response: {:?}", response); + self.ready_blocks.push_back((peer, response)); + } + + fn poll( + &mut self, + _ctx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll> { + if let Some((peer_id, message)) = self.ready_blocks.pop_front() { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: message, + }) + } + Poll::Pending + } +} + +/// Bitswap protocol error. +#[derive(Debug, thiserror::Error)] +pub enum BitswapError { + /// Protobuf decoding error. + #[error("Failed to decode request: {0}.")] + DecodeProto(#[from] prost::DecodeError), + + /// Protobuf encoding error. + #[error("Failed to encode response: {0}.")] + EncodeProto(#[from] prost::EncodeError), + + /// Client backend error. + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + /// Error parsing CID + #[error(transparent)] + BadCid(#[from] cid::Error), + + /// Packet read error. + #[error(transparent)] + Read(#[from] io::Error), + + /// Error sending response. + #[error("Failed to send response.")] + SendResponse, +} diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 14f7e8ffbf76a..2622762da5fc9 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -23,30 +23,29 @@ pub use sc_network_common::{ config::ProtocolId, - protocol::role::Role, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, sync::warp::WarpSyncProvider, - ExHashT, }; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::ChainSyncInterface; +use crate::ExHashT; + use core::{fmt, iter}; +use futures::future; use libp2p::{ identity::{ed25519, Keypair}, - multiaddr, Multiaddr, + multiaddr, Multiaddr, PeerId, }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, - sync::ChainSync, -}; +use sc_network_common::sync::ChainSync; use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, + collections::HashMap, error::Error, fs, future::Future, @@ -54,14 +53,17 @@ use std::{ net::Ipv4Addr, path::{Path, PathBuf}, pin::Pin, + str, + str::FromStr, sync::Arc, }; use zeroize::Zeroize; /// Network initialization parameters. -pub struct Params +pub struct Params where B: BlockT + 'static, + H: ExHashT, { /// Assigned role for our node (full, light, ...). pub role: Role, @@ -70,18 +72,23 @@ where /// default. pub executor: Option + Send>>) + Send>>, + /// How to spawn the background task dedicated to the transactions handler. + pub transactions_handler_executor: Box + Send>>) + Send>, + /// Network layer configuration. pub network_config: NetworkConfiguration, /// Client that contains the blockchain. pub chain: Arc, - /// Legacy name of the protocol to use on the wire. Should be different for each chain. - pub protocol_id: ProtocolId, + /// Pool of transactions. + /// + /// The network worker will fetch transactions from this object in order to propagate them on + /// the network. + pub transaction_pool: Arc>, - /// Fork ID to distinguish protocols of different hard forks. Part of the standard protocol - /// name on the wire. - pub fork_id: Option, + /// Name of the protocol to use on the wire. Should be different for each chain. + pub protocol_id: ProtocolId, /// Import queue to use. /// @@ -92,15 +99,9 @@ where /// Instance of chain sync implementation. pub chain_sync: Box>, - /// Interface that can be used to delegate syncing-related function calls to `ChainSync` - pub chain_sync_service: Box>, - /// Registry for recording prometheus metrics to. pub metrics_registry: Option, - /// Block announce protocol configuration - pub block_announce_config: NonDefaultSetConfig, - /// Request response configuration for the block request protocol. /// /// [`RequestResponseConfig::name`] is used to tag outgoing block requests with the correct @@ -133,9 +134,215 @@ where /// Optional warp sync protocol config. pub warp_sync_protocol_config: Option, +} + +/// Role of the local node. +#[derive(Debug, Clone)] +pub enum Role { + /// Regular full node. + Full, + /// Actual authority. + Authority, +} + +impl Role { + /// True for [`Role::Authority`]. + pub fn is_authority(&self) -> bool { + matches!(self, Self::Authority { .. }) + } +} + +impl fmt::Display for Role { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Full => write!(f, "FULL"), + Self::Authority { .. } => write!(f, "AUTHORITY"), + } + } +} + +/// Result of the transaction import. +#[derive(Clone, Copy, Debug)] +pub enum TransactionImport { + /// Transaction is good but already known by the transaction pool. + KnownGood, + /// Transaction is good and not yet known. + NewGood, + /// Transaction is invalid. + Bad, + /// Transaction import was not performed. + None, +} + +/// Future resolving to transaction import result. +pub type TransactionImportFuture = Pin + Send>>; + +/// Transaction pool interface +pub trait TransactionPool: Send + Sync { + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Get hash of transaction. + fn hash_of(&self, transaction: &B::Extrinsic) -> H; + /// Import a transaction into the pool. + /// + /// This will return future. + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); + /// Get transaction by hash. + fn transaction(&self, hash: &H) -> Option; +} + +/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always +/// empty and discards all incoming transactions. +/// +/// Requires the "hash" type to implement the `Default` trait. +/// +/// Useful for testing purposes. +pub struct EmptyTransactionPool; + +impl TransactionPool for EmptyTransactionPool { + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + Vec::new() + } - /// Request response protocol configurations - pub request_response_protocol_configs: Vec, + fn hash_of(&self, _transaction: &B::Extrinsic) -> H { + Default::default() + } + + fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { + Box::pin(future::ready(TransactionImport::KnownGood)) + } + + fn on_broadcasted(&self, _: HashMap>) {} + + fn transaction(&self, _h: &H) -> Option { + None + } +} + +/// Parses a string address and splits it into Multiaddress and PeerId, if +/// valid. +/// +/// # Example +/// +/// ``` +/// # use sc_network::{Multiaddr, PeerId, config::parse_str_addr}; +/// let (peer_id, addr) = parse_str_addr( +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" +/// ).unwrap(); +/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); +/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); +/// ``` +pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { + let addr: Multiaddr = addr_str.parse()?; + parse_addr(addr) +} + +/// Splits a Multiaddress into a Multiaddress and PeerId. +pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { + let who = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, + _ => return Err(ParseErr::PeerIdMissing), + }; + + Ok((who, addr)) +} + +/// Address of a node, including its identity. +/// +/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. +/// +/// # Example +/// +/// ``` +/// # use sc_network::{Multiaddr, PeerId, config::MultiaddrWithPeerId}; +/// let addr: MultiaddrWithPeerId = +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); +/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); +/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] +#[serde(try_from = "String", into = "String")] +pub struct MultiaddrWithPeerId { + /// Address of the node. + pub multiaddr: Multiaddr, + /// Its identity. + pub peer_id: PeerId, +} + +impl MultiaddrWithPeerId { + /// Concatenates the multiaddress and peer ID into one multiaddress containing both. + pub fn concat(&self) -> Multiaddr { + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); + self.multiaddr.clone().with(proto) + } +} + +impl fmt::Display for MultiaddrWithPeerId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.concat(), f) + } +} + +impl FromStr for MultiaddrWithPeerId { + type Err = ParseErr; + + fn from_str(s: &str) -> Result { + let (peer_id, multiaddr) = parse_str_addr(s)?; + Ok(Self { peer_id, multiaddr }) + } +} + +impl From for String { + fn from(ma: MultiaddrWithPeerId) -> String { + format!("{}", ma) + } +} + +impl TryFrom for MultiaddrWithPeerId { + type Error = ParseErr; + fn try_from(string: String) -> Result { + string.parse() + } +} + +/// Error that can be generated by `parse_str_addr`. +#[derive(Debug)] +pub enum ParseErr { + /// Error while parsing the multiaddress. + MultiaddrParse(multiaddr::Error), + /// Multihash of the peer ID is invalid. + InvalidPeerId, + /// The peer ID is missing from the address. + PeerIdMissing, +} + +impl fmt::Display for ParseErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::MultiaddrParse(err) => write!(f, "{}", err), + Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), + Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), + } + } +} + +impl std::error::Error for ParseErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::MultiaddrParse(err) => Some(err), + Self::InvalidPeerId => None, + Self::PeerIdMissing => None, + } + } +} + +impl From for ParseErr { + fn from(err: multiaddr::Error) -> ParseErr { + Self::MultiaddrParse(err) + } } /// Sync operation mode. @@ -306,6 +513,132 @@ impl NetworkConfiguration { } } +/// Configuration for a set of nodes. +#[derive(Clone, Debug)] +pub struct SetConfig { + /// Maximum allowed number of incoming substreams related to this set. + pub in_peers: u32, + /// Number of outgoing substreams related to this set that we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically + /// refused. + pub non_reserved_mode: NonReservedPeerMode, +} + +impl Default for SetConfig { + fn default() -> Self { + Self { + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } + } +} + +/// Extension to [`SetConfig`] for sets that aren't the default set. +/// +/// > **Note**: As new fields might be added in the future, please consider using the `new` method +/// > and modifiers instead of creating this struct manually. +#[derive(Clone, Debug)] +pub struct NonDefaultSetConfig { + /// Name of the notifications protocols of this set. A substream on this set will be + /// considered established once this protocol is open. + /// + /// > **Note**: This field isn't present for the default set, as this is handled internally + /// > by the networking code. + pub notifications_protocol: Cow<'static, str>, + /// If the remote reports that it doesn't support the protocol indicated in the + /// `notifications_protocol` field, then each of these fallback names will be tried one by + /// one. + /// + /// If a fallback is used, it will be reported in + /// [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. + pub fallback_names: Vec>, + /// Maximum allowed size of single notifications. + pub max_notification_size: u64, + /// Base configuration. + pub set_config: SetConfig, +} + +impl NonDefaultSetConfig { + /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. + pub fn new(notifications_protocol: Cow<'static, str>, max_notification_size: u64) -> Self { + Self { + notifications_protocol, + max_notification_size, + fallback_names: Vec::new(), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } + + /// Modifies the configuration to allow non-reserved nodes. + pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { + self.set_config.in_peers = in_peers; + self.set_config.out_peers = out_peers; + self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; + } + + /// Add a node to the list of reserved nodes. + pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { + self.set_config.reserved_nodes.push(peer); + } + + /// Add a list of protocol names used for backward compatibility. + /// + /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. + pub fn add_fallback_names(&mut self, fallback_names: Vec>) { + self.fallback_names.extend(fallback_names); + } +} + +/// Configuration for the transport layer. +#[derive(Clone, Debug)] +pub enum TransportConfig { + /// Normal transport mode. + Normal { + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + enable_mdns: bool, + + /// If true, allow connecting to private IPv4 addresses (as defined in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have + /// been passed in [`NetworkConfiguration::boot_nodes`]. + allow_private_ipv4: bool, + }, + + /// Only allow connections within the same process. + /// Only addresses of the form `/memory/...` will be supported. + MemoryOnly, +} + +/// The policy for connections to non-reserved peers. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum NonReservedPeerMode { + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, +} + +impl NonReservedPeerMode { + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(Self::Accept), + "deny" => Some(Self::Deny), + _ => None, + } + } +} + /// The configuration of a node's secret key, describing the type of key /// and how it is obtained. A node's identity keypair is the result of /// the evaluation of the node key configuration. @@ -371,7 +704,7 @@ impl NodeKeyConfig { f, |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { if s.len() == 64 { - array_bytes::hex2bytes(&s).ok() + hex::decode(&s).ok() } else { None } @@ -455,8 +788,11 @@ mod tests { } fn secret_bytes(kp: &Keypair) -> Vec { - let Keypair::Ed25519(p) = kp; - p.secret().as_ref().iter().cloned().collect() + match kp { + Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), + Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), + _ => panic!("Unexpected keypair."), + } } #[test] diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 00fc78061293d..f3d1588c0280e 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,7 +46,7 @@ //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. -use array_bytes::bytes2hex; +use crate::utils::LruHashSet; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; @@ -67,13 +67,12 @@ use libp2p::{ mdns::{Mdns, MdnsConfig, MdnsEvent}, multiaddr::Protocol, swarm::{ - behaviour::toggle::{Toggle, ToggleIntoConnectionHandler}, - ConnectionHandler, DialError, IntoConnectionHandler, NetworkBehaviour, - NetworkBehaviourAction, PollParameters, + handler::multi::IntoMultiHandler, ConnectionHandler, DialError, IntoConnectionHandler, + NetworkBehaviour, NetworkBehaviourAction, PollParameters, }, }; -use log::{debug, info, trace, warn}; -use sc_network_common::{config::ProtocolId, utils::LruHashSet}; +use log::{debug, error, info, trace, warn}; +use sc_network_common::config::ProtocolId; use sp_core::hexdisplay::HexDisplay; use std::{ cmp, @@ -91,8 +90,8 @@ const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; /// `DiscoveryBehaviour` configuration. /// -/// Note: In order to discover nodes or load and store values via Kademlia one has to add -/// Kademlia protocol via [`DiscoveryConfig::with_kademlia`]. +/// Note: In order to discover nodes or load and store values via Kademlia one has to add at least +/// one protocol via [`DiscoveryConfig::add_protocol`]. pub struct DiscoveryConfig { local_peer_id: PeerId, permanent_addresses: Vec<(PeerId, Multiaddr)>, @@ -102,7 +101,7 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - kademlia_protocols: Vec>, + protocol_ids: HashSet, } impl DiscoveryConfig { @@ -117,7 +116,7 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - kademlia_protocols: Vec::new(), + protocol_ids: HashSet::new(), } } @@ -162,18 +161,14 @@ impl DiscoveryConfig { } /// Add discovery via Kademlia for the given protocol. - /// - /// Currently accepts `protocol_id`. This should be removed once all the nodes - /// are upgraded to genesis hash- and fork ID-based Kademlia protocol name. - pub fn with_kademlia>( - &mut self, - genesis_hash: Hash, - fork_id: Option<&str>, - protocol_id: &ProtocolId, - ) -> &mut Self { - self.kademlia_protocols = Vec::new(); - self.kademlia_protocols.push(kademlia_protocol_name(genesis_hash, fork_id)); - self.kademlia_protocols.push(legacy_kademlia_protocol_name(protocol_id)); + pub fn add_protocol(&mut self, id: ProtocolId) -> &mut Self { + if self.protocol_ids.contains(&id) { + warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); + return self + } + + self.protocol_ids.insert(id); + self } @@ -195,34 +190,37 @@ impl DiscoveryConfig { discovery_only_if_under_num, enable_mdns, kademlia_disjoint_query_paths, - kademlia_protocols, + protocol_ids, } = self; - let kademlia = if !kademlia_protocols.is_empty() { - let mut config = KademliaConfig::default(); - config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); - // By default Kademlia attempts to insert all peers into its routing table once a - // dialing attempt succeeds. In order to control which peer is added, disable the - // auto-insertion and instead add peers manually. - config.set_kbucket_inserts(KademliaBucketInserts::Manual); - config.disjoint_query_paths(kademlia_disjoint_query_paths); + let kademlias = protocol_ids + .into_iter() + .map(|protocol_id| { + let proto_name = protocol_name_from_protocol_id(&protocol_id); - let store = MemoryStore::new(local_peer_id); - let mut kad = Kademlia::with_config(local_peer_id, store, config); + let mut config = KademliaConfig::default(); + config.set_protocol_name(proto_name); + // By default Kademlia attempts to insert all peers into its routing table once a + // dialing attempt succeeds. In order to control which peer is added, disable the + // auto-insertion and instead add peers manually. + config.set_kbucket_inserts(KademliaBucketInserts::Manual); + config.disjoint_query_paths(kademlia_disjoint_query_paths); - for (peer_id, addr) in &permanent_addresses { - kad.add_address(peer_id, addr.clone()); - } + let store = MemoryStore::new(local_peer_id); + let mut kad = Kademlia::with_config(local_peer_id, store, config); - Some(kad) - } else { - None - }; + for (peer_id, addr) in &permanent_addresses { + kad.add_address(peer_id, addr.clone()); + } + + (protocol_id, kad) + }) + .collect(); DiscoveryBehaviour { permanent_addresses, ephemeral_addresses: HashMap::new(), - kademlia: Toggle::from(kademlia), + kademlias, next_kad_random_query: if dht_random_walk { Some(Delay::new(Duration::new(0, 0))) } else { @@ -235,15 +233,9 @@ impl DiscoveryConfig { allow_private_ipv4, discovery_only_if_under_num, mdns: if enable_mdns { - match Mdns::new(MdnsConfig::default()) { - Ok(mdns) => Some(mdns), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None - }, - } + MdnsWrapper::Instantiating(Mdns::new(MdnsConfig::default()).boxed()) } else { - None + MdnsWrapper::Disabled }, allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( @@ -262,11 +254,10 @@ pub struct DiscoveryBehaviour { /// Same as `permanent_addresses`, except that addresses that fail to reach a peer are /// removed. ephemeral_addresses: HashMap>, - /// Kademlia requests and answers. Even though it's wrapped in `Toggle`, currently - /// it's always enabled in `NetworkWorker::new()`. - kademlia: Toggle>, + /// Kademlia requests and answers. + kademlias: HashMap>, /// Discovers nodes on the local network. - mdns: Option, + mdns: MdnsWrapper, /// Stream that fires when we need to perform the next random Kademlia query. `None` if /// random walking is disabled. next_kad_random_query: Option, @@ -293,7 +284,7 @@ impl DiscoveryBehaviour { /// Returns the list of nodes that we know exist in the network. pub fn known_peers(&mut self) -> HashSet { let mut peers = HashSet::new(); - if let Some(k) = self.kademlia.as_mut() { + for k in self.kademlias.values_mut() { for b in k.kbuckets() { for e in b.iter() { if !peers.contains(e.node.key.preimage()) { @@ -313,7 +304,7 @@ impl DiscoveryBehaviour { pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { let addrs_list = self.ephemeral_addresses.entry(peer_id).or_default(); if !addrs_list.iter().any(|a| *a == addr) { - if let Some(k) = self.kademlia.as_mut() { + for k in self.kademlias.values_mut() { k.add_address(&peer_id, addr.clone()); } @@ -322,51 +313,51 @@ impl DiscoveryBehaviour { } } - /// Add a self-reported address of a remote peer to the k-buckets of the DHT - /// if it has compatible `supported_protocols`. + /// Add a self-reported address of a remote peer to the k-buckets of the supported + /// DHTs (`supported_protocols`). /// /// **Note**: It is important that you call this method. The discovery mechanism will not /// automatically add connecting peers to the Kademlia k-buckets. pub fn add_self_reported_address( &mut self, peer_id: &PeerId, - supported_protocols: &[impl AsRef<[u8]>], + supported_protocols: impl Iterator>, addr: Multiaddr, ) { - if let Some(kademlia) = self.kademlia.as_mut() { - if !self.allow_non_globals_in_dht && !Self::can_add_to_dht(&addr) { - trace!( - target: "sub-libp2p", - "Ignoring self-reported non-global address {} from {}.", addr, peer_id - ); - return - } + if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { + trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); + return + } - if let Some(matching_protocol) = supported_protocols - .iter() - .find(|p| kademlia.protocol_names().iter().any(|k| k.as_ref() == p.as_ref())) - { - trace!( - target: "sub-libp2p", - "Adding self-reported address {} from {} to Kademlia DHT {}.", - addr, peer_id, String::from_utf8_lossy(matching_protocol.as_ref()), - ); - kademlia.add_address(peer_id, addr.clone()); - } else { - trace!( - target: "sub-libp2p", - "Ignoring self-reported address {} from {} as remote node is not part of the \ - Kademlia DHT supported by the local node.", addr, peer_id, - ); + let mut added = false; + for protocol in supported_protocols { + for kademlia in self.kademlias.values_mut() { + if protocol.as_ref() == kademlia.protocol_name() { + trace!( + target: "sub-libp2p", + "Adding self-reported address {} from {} to Kademlia DHT {}.", + addr, peer_id, String::from_utf8_lossy(kademlia.protocol_name()), + ); + kademlia.add_address(peer_id, addr.clone()); + added = true; + } } } + + if !added { + trace!( + target: "sub-libp2p", + "Ignoring self-reported address {} from {} as remote node is not part of any \ + Kademlia DHTs supported by the local node.", addr, peer_id, + ); + } } /// Start fetching a record from the DHT. /// /// A corresponding `ValueFound` or `ValueNotFound` event will later be generated. pub fn get_value(&mut self, key: record::Key) { - if let Some(k) = self.kademlia.as_mut() { + for k in self.kademlias.values_mut() { k.get_record(key.clone(), Quorum::One); } } @@ -376,7 +367,7 @@ impl DiscoveryBehaviour { /// /// A corresponding `ValuePut` or `ValuePutFailed` event will later be generated. pub fn put_value(&mut self, key: record::Key, value: Vec) { - if let Some(k) = self.kademlia.as_mut() { + for k in self.kademlias.values_mut() { if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) { warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); self.pending_events @@ -389,27 +380,37 @@ impl DiscoveryBehaviour { /// /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm /// of their lower bound. - pub fn num_entries_per_kbucket(&mut self) -> Option> { - self.kademlia.as_mut().map(|kad| { - kad.kbuckets() + pub fn num_entries_per_kbucket( + &mut self, + ) -> impl ExactSizeIterator)> { + self.kademlias.iter_mut().map(|(id, kad)| { + let buckets = kad + .kbuckets() .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) - .collect() + .collect(); + (id, buckets) }) } /// Returns the number of records in the Kademlia record stores. - pub fn num_kademlia_records(&mut self) -> Option { + pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { // Note that this code is ok only because we use a `MemoryStore`. - self.kademlia.as_mut().map(|kad| kad.store_mut().records().count()) + self.kademlias.iter_mut().map(|(id, kad)| { + let num = kad.store_mut().records().count(); + (id, num) + }) } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> Option { + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { // Note that this code is ok only because we use a `MemoryStore`. If the records were // for example stored on disk, this would load every single one of them every single time. - self.kademlia - .as_mut() - .map(|kad| kad.store_mut().records().fold(0, |tot, rec| tot + rec.value.len())) + self.kademlias.iter_mut().map(|(id, kad)| { + let size = kad.store_mut().records().fold(0, |tot, rec| tot + rec.value.len()); + (id, size) + }) } /// Can the given `Multiaddr` be put into the DHT? @@ -418,7 +419,7 @@ impl DiscoveryBehaviour { // NB: Currently all DNS names are allowed and no check for TLD suffixes is done // because the set of valid domains is highly dynamic and would require frequent // updates, for example by utilising publicsuffix.org or IANA. - pub fn can_add_to_dht(addr: &Multiaddr) -> bool { + pub fn can_add_to_dht(&self, addr: &Multiaddr) -> bool { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), @@ -428,6 +429,29 @@ impl DiscoveryBehaviour { }; ip.is_global() } + + fn new_handler_with_replacement( + &mut self, + pid: ProtocolId, + handler: KademliaHandlerProto, + ) -> ::ConnectionHandler { + let mut handlers: HashMap<_, _> = self + .kademlias + .iter_mut() + .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))) + .collect(); + + if let Some(h) = handlers.get_mut(&pid) { + *h = handler + } + + IntoMultiHandler::try_from_iter(handlers).expect( + "There can be at most one handler per `ProtocolId` and protocol names contain the \ + `ProtocolId` so no two protocol names in `self.kademlias` can be equal which is the \ + only error `try_from_iter` can return, therefore this call is guaranteed to succeed; \ + qed", + ) + } } /// Event generated by the `DiscoveryBehaviour`. @@ -468,18 +492,28 @@ pub enum DiscoveryOut { /// Returning the corresponding key as well as the request duration. ValuePutFailed(record::Key, Duration), - /// Started a random Kademlia query. + /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. /// /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. - RandomKademliaStarted, + RandomKademliaStarted(Vec), } impl NetworkBehaviour for DiscoveryBehaviour { - type ConnectionHandler = ToggleIntoConnectionHandler>; + type ConnectionHandler = IntoMultiHandler>; type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ConnectionHandler { - self.kademlia.new_handler() + let iter = self + .kademlias + .iter_mut() + .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); + + IntoMultiHandler::try_from_iter(iter).expect( + "There can be at most one handler per `ProtocolId` and protocol names contain the \ + `ProtocolId` so no two protocol names in `self.kademlias` can be equal which is the \ + only error `try_from_iter` can return, therefore this call is guaranteed to succeed; \ + qed", + ) } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { @@ -494,12 +528,13 @@ impl NetworkBehaviour for DiscoveryBehaviour { } { - let mut list_to_filter = self.kademlia.addresses_of_peer(peer_id); - - if let Some(ref mut mdns) = self.mdns { - list_to_filter.extend(mdns.addresses_of_peer(peer_id)); + let mut list_to_filter = Vec::new(); + for k in self.kademlias.values_mut() { + list_to_filter.extend(k.addresses_of_peer(peer_id)) } + list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); + if !self.allow_private_ipv4 { list_to_filter.retain(|addr| match addr.iter().next() { Some(Protocol::Ip4(addr)) if !IpNetwork::from(addr).is_global() => false, @@ -523,7 +558,9 @@ impl NetworkBehaviour for DiscoveryBehaviour { old: &ConnectedPoint, new: &ConnectedPoint, ) { - self.kademlia.inject_address_change(peer_id, connection_id, old, new) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_address_change(k, peer_id, connection_id, old, new); + } } fn inject_connection_established( @@ -535,13 +572,16 @@ impl NetworkBehaviour for DiscoveryBehaviour { other_established: usize, ) { self.num_connections += 1; - self.kademlia.inject_connection_established( - peer_id, - conn, - endpoint, - failed_addresses, - other_established, - ) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connection_established( + k, + peer_id, + conn, + endpoint, + failed_addresses, + other_established, + ) + } } fn inject_connection_closed( @@ -553,19 +593,23 @@ impl NetworkBehaviour for DiscoveryBehaviour { remaining_established: usize, ) { self.num_connections -= 1; - self.kademlia.inject_connection_closed( - peer_id, - conn, - endpoint, - handler, - remaining_established, - ) + for (pid, event) in handler.into_iter() { + if let Some(kad) = self.kademlias.get_mut(&pid) { + kad.inject_connection_closed(peer_id, conn, endpoint, event, remaining_established) + } else { + error!( + target: "sub-libp2p", + "inject_connection_closed: no kademlia instance registered for protocol {:?}", + pid, + ) + } + } } fn inject_dial_failure( &mut self, peer_id: Option, - handler: Self::ConnectionHandler, + _: Self::ConnectionHandler, error: &DialError, ) { if let Some(peer_id) = peer_id { @@ -578,22 +622,32 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - self.kademlia.inject_dial_failure(peer_id, handler, error) + for k in self.kademlias.values_mut() { + let handler = k.new_handler(); + NetworkBehaviour::inject_dial_failure(k, peer_id, handler, error); + } } fn inject_event( &mut self, peer_id: PeerId, connection: ConnectionId, - event: <::Handler as ConnectionHandler>::OutEvent, + (pid, event): <::Handler as ConnectionHandler>::OutEvent, ) { - self.kademlia.inject_event(peer_id, connection, event) + if let Some(kad) = self.kademlias.get_mut(&pid) { + return kad.inject_event(peer_id, connection, event) + } + error!( + target: "sub-libp2p", + "inject_node_event: no kademlia instance registered for protocol {:?}", + pid, + ) } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.into())); - if Self::can_add_to_dht(addr) { + if self.can_add_to_dht(addr) { // NOTE: we might re-discover the same address multiple times // in which case we just want to refrain from logging. if self.known_external_addresses.insert(new_addr.clone()) { @@ -605,26 +659,36 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - self.kademlia.inject_new_external_addr(addr) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_external_addr(k, addr) + } } fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { // We intentionally don't remove the element from `known_external_addresses` in order // to not print the log line again. - self.kademlia.inject_expired_external_addr(addr) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_expired_external_addr(k, addr) + } } fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { - self.kademlia.inject_expired_listen_addr(id, addr) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(k, id, addr) + } } fn inject_new_listener(&mut self, id: ListenerId) { - self.kademlia.inject_new_listener(id) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_listener(k, id) + } } fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { - self.kademlia.inject_new_listen_addr(id, addr) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_listen_addr(k, id, addr) + } } fn inject_listen_failure(&mut self, _: &Multiaddr, _: &Multiaddr, _: Self::ConnectionHandler) { @@ -632,11 +696,15 @@ impl NetworkBehaviour for DiscoveryBehaviour { } fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { - self.kademlia.inject_listener_error(id, err) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_listener_error(k, id, err) + } } fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - self.kademlia.inject_listener_closed(id, reason) + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_listener_closed(k, id, reason) + } } fn poll( @@ -650,214 +718,187 @@ impl NetworkBehaviour for DiscoveryBehaviour { } // Poll the stream that fires when we need to start a random Kademlia query. - if let Some(kademlia) = self.kademlia.as_mut() { - if let Some(next_kad_random_query) = self.next_kad_random_query.as_mut() { - while next_kad_random_query.poll_unpin(cx).is_ready() { - let actually_started = - if self.num_connections < self.discovery_only_if_under_num { - let random_peer_id = PeerId::random(); - debug!( - target: "sub-libp2p", - "Libp2p <= Starting random Kademlia request for {:?}", - random_peer_id, - ); - kademlia.get_closest_peers(random_peer_id); - true - } else { - debug!( - target: "sub-libp2p", - "Kademlia paused due to high number of connections ({})", - self.num_connections - ); - false - }; - - // Schedule the next random query with exponentially increasing delay, - // capped at 60 seconds. - *next_kad_random_query = Delay::new(self.duration_to_next_kad); - self.duration_to_next_kad = - cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); - - if actually_started { - let ev = DiscoveryOut::RandomKademliaStarted; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + if let Some(next_kad_random_query) = self.next_kad_random_query.as_mut() { + while next_kad_random_query.poll_unpin(cx).is_ready() { + let actually_started = if self.num_connections < self.discovery_only_if_under_num { + let random_peer_id = PeerId::random(); + debug!( + target: "sub-libp2p", + "Libp2p <= Starting random Kademlia request for {:?}", + random_peer_id, + ); + for k in self.kademlias.values_mut() { + k.get_closest_peers(random_peer_id); } + true + } else { + debug!( + target: "sub-libp2p", + "Kademlia paused due to high number of connections ({})", + self.num_connections + ); + false + }; + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + *next_kad_random_query = Delay::new(self.duration_to_next_kad); + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + + if actually_started { + let ev = DiscoveryOut::RandomKademliaStarted( + self.kademlias.keys().cloned().collect(), + ); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } } } - while let Poll::Ready(ev) = self.kademlia.poll(cx, params) { - match ev { - NetworkBehaviourAction::GenerateEvent(ev) => match ev { - KademliaEvent::RoutingUpdated { peer, .. } => { - let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::UnroutablePeer { peer, .. } => { - let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::RoutablePeer { peer, .. } => { - let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::PendingRoutablePeer { .. } | - KademliaEvent::InboundRequest { .. } => { - // We are not interested in this event at the moment. - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::GetClosestPeers(res), - .. - } => match res { - Err(GetClosestPeersError::Timeout { key, peers }) => { - debug!( - target: "sub-libp2p", - "Libp2p => Query for {:?} timed out with {} results", - HexDisplay::from(&key), peers.len(), - ); + // Poll Kademlias. + for (pid, kademlia) in &mut self.kademlias { + while let Poll::Ready(ev) = kademlia.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(ev) => match ev { + KademliaEvent::RoutingUpdated { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, - Ok(ok) => { - trace!( - target: "sub-libp2p", - "Libp2p => Query for {:?} yielded {:?} results", - HexDisplay::from(&ok.key), ok.peers.len(), - ); - if ok.peers.is_empty() && self.num_connections != 0 { + KademliaEvent::UnroutablePeer { peer, .. } => { + let ev = DiscoveryOut::UnroutablePeer(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::RoutablePeer { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::PendingRoutablePeer { .. } | + KademliaEvent::InboundRequest { .. } => { + // We are not interested in this event at the moment. + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::GetClosestPeers(res), + .. + } => match res { + Err(GetClosestPeersError::Timeout { key, peers }) => { debug!( target: "sub-libp2p", - "Libp2p => Random Kademlia query has yielded empty results", + "Libp2p => Query for {:?} timed out with {} results", + HexDisplay::from(&key), peers.len(), ); - } - }, - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::GetRecord(res), - stats, - .. - } => { - let ev = match res { - Ok(ok) => { - let results = ok - .records - .into_iter() - .map(|r| (r.record.key, r.record.value)) - .collect(); - - DiscoveryOut::ValueFound( - results, - stats.duration().unwrap_or_default(), - ) }, - Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { + Ok(ok) => { trace!( target: "sub-libp2p", - "Libp2p => Failed to get record: {:?}", - e, - ); - DiscoveryOut::ValueNotFound( - e.into_key(), - stats.duration().unwrap_or_default(), - ) - }, - Err(e) => { - debug!( - target: "sub-libp2p", - "Libp2p => Failed to get record: {:?}", - e, + "Libp2p => Query for {:?} yielded {:?} results", + HexDisplay::from(&ok.key), ok.peers.len(), ); - DiscoveryOut::ValueNotFound( - e.into_key(), - stats.duration().unwrap_or_default(), - ) + if ok.peers.is_empty() && self.num_connections != 0 { + debug!( + target: "sub-libp2p", + "Libp2p => Random Kademlia query has yielded empty results", + ); + } }, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::PutRecord(res), - stats, - .. - } => { - let ev = match res { - Ok(ok) => - DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()), - Err(e) => { - debug!( - target: "sub-libp2p", - "Libp2p => Failed to put record: {:?}", - e, - ); - DiscoveryOut::ValuePutFailed( - e.into_key(), + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::GetRecord(res), + stats, + .. + } => { + let ev = match res { + Ok(ok) => { + let results = ok + .records + .into_iter() + .map(|r| (r.record.key, r.record.value)) + .collect(); + + DiscoveryOut::ValueFound( + results, + stats.duration().unwrap_or_default(), + ) + }, + Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { + trace!( + target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", + e, + ); + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_default(), + ) + }, + Err(e) => { + debug!( + target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", + e, + ); + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_default(), + ) + }, + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::PutRecord(res), + stats, + .. + } => { + let ev = match res { + Ok(ok) => DiscoveryOut::ValuePut( + ok.key, stats.duration().unwrap_or_default(), - ) - }, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::RepublishRecord(res), - .. - } => match res { - Ok(ok) => debug!( - target: "sub-libp2p", - "Libp2p => Record republished: {:?}", - ok.key, - ), - Err(e) => debug!( - target: "sub-libp2p", - "Libp2p => Republishing of record {:?} failed with: {:?}", - e.key(), e, - ), - }, - // We never start any other type of query. - KademliaEvent::OutboundQueryCompleted { result: e, .. } => { - warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) - }, - }, - NetworkBehaviourAction::Dial { opts, handler } => - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }), - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event, - }), - NetworkBehaviourAction::ReportObservedAddr { address, score } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, - score, - }), - NetworkBehaviourAction::CloseConnection { peer_id, connection } => - return Poll::Ready(NetworkBehaviourAction::CloseConnection { - peer_id, - connection, - }), - } - } - - // Poll mDNS. - if let Some(ref mut mdns) = self.mdns { - while let Poll::Ready(ev) = mdns.poll(cx, params) { - match ev { - NetworkBehaviourAction::GenerateEvent(event) => match event { - MdnsEvent::Discovered(list) => { - if self.num_connections >= self.discovery_only_if_under_num { - continue - } - - self.pending_events - .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - } + ), + Err(e) => { + debug!( + target: "sub-libp2p", + "Libp2p => Failed to put record: {:?}", + e, + ); + DiscoveryOut::ValuePutFailed( + e.into_key(), + stats.duration().unwrap_or_default(), + ) + }, + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::RepublishRecord(res), + .. + } => match res { + Ok(ok) => debug!( + target: "sub-libp2p", + "Libp2p => Record republished: {:?}", + ok.key, + ), + Err(e) => debug!( + target: "sub-libp2p", + "Libp2p => Republishing of record {:?} failed with: {:?}", + e.key(), e, + ), + }, + // We never start any other type of query. + KademliaEvent::OutboundQueryCompleted { result: e, .. } => { + warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, - MdnsEvent::Expired(_) => {}, }, - NetworkBehaviourAction::Dial { .. } => { - unreachable!("mDNS never dials!"); + NetworkBehaviourAction::Dial { opts, handler } => { + let pid = pid.clone(); + let handler = self.new_handler_with_replacement(pid, handler); + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) }, - NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: (pid.clone(), event), + }), NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, @@ -872,54 +913,112 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } + // Poll mDNS. + while let Poll::Ready(ev) = self.mdns.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(event) => match event { + MdnsEvent::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue + } + + self.pending_events + .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + } + }, + MdnsEvent::Expired(_) => {}, + }, + NetworkBehaviourAction::Dial { .. } => { + unreachable!("mDNS never dials!"); + }, + NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, + }), + } + } + Poll::Pending } } -/// Legacy (fallback) Kademlia protocol name based on `protocol_id`. -fn legacy_kademlia_protocol_name(id: &ProtocolId) -> Vec { +// NB: If this protocol name derivation is changed, check if +// `DiscoveryBehaviour::new_handler` is still correct. +fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { let mut v = vec![b'/']; v.extend_from_slice(id.as_ref().as_bytes()); v.extend_from_slice(b"/kad"); v } -/// Kademlia protocol name based on `genesis_hash` and `fork_id`. -fn kademlia_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> Vec { - let genesis_hash_hex = bytes2hex("", genesis_hash.as_ref()); - if let Some(fork_id) = fork_id { - format!("/{}/{}/kad", genesis_hash_hex, fork_id).as_bytes().into() - } else { - format!("/{}/kad", genesis_hash_hex).as_bytes().into() +/// [`Mdns::new`] returns a future. Instead of forcing [`DiscoveryConfig::finish`] and all its +/// callers to be async, lazily instantiate [`Mdns`]. +enum MdnsWrapper { + Instantiating(futures::future::BoxFuture<'static, std::io::Result>), + Ready(Mdns), + Disabled, +} + +impl MdnsWrapper { + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + match self { + Self::Instantiating(_) => Vec::new(), + Self::Ready(mdns) => mdns.addresses_of_peer(peer_id), + Self::Disabled => Vec::new(), + } + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + params: &mut impl PollParameters, + ) -> Poll::ConnectionHandler>> { + loop { + match self { + Self::Instantiating(fut) => + *self = match futures::ready!(fut.as_mut().poll(cx)) { + Ok(mdns) => Self::Ready(mdns), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + Self::Disabled + }, + }, + Self::Ready(mdns) => return mdns.poll(cx, params), + Self::Disabled => return Poll::Pending, + } + } } } #[cfg(test)] mod tests { - use super::{ - kademlia_protocol_name, legacy_kademlia_protocol_name, DiscoveryConfig, DiscoveryOut, - }; + use super::{protocol_name_from_protocol_id, DiscoveryConfig, DiscoveryOut}; use futures::prelude::*; use libp2p::{ core::{ transport::{MemoryTransport, Transport}, upgrade, }, - identity::{ed25519, Keypair}, + identity::Keypair, noise, swarm::{Swarm, SwarmEvent}, - yamux, Multiaddr, + yamux, Multiaddr, PeerId, }; use sc_network_common::config::ProtocolId; - use sp_core::hash::H256; use std::{collections::HashSet, task::Poll}; #[test] fn discovery_working() { let mut first_swarm_peer_id_and_addr = None; - - let genesis_hash = H256::from_low_u64_be(1); - let fork_id = Some("test-fork-id"); let protocol_id = ProtocolId::from("dot"); // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of @@ -944,7 +1043,7 @@ mod tests { .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .with_kademlia(genesis_hash, fork_id, &protocol_id); + .add_protocol(protocol_id.clone()); config.finish() }; @@ -970,7 +1069,7 @@ mod tests { // Skip the first swarm as all other swarms already know it. .skip(1) .filter(|p| *p != n) - .map(|p| *Swarm::local_peer_id(&swarms[p].0)) + .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) .collect::>() }) .collect::>(); @@ -997,25 +1096,19 @@ mod tests { } }) .unwrap(); - // Test both genesis hash-based and legacy - // protocol names. - let protocol_name = if swarm_n % 2 == 0 { - kademlia_protocol_name(genesis_hash, fork_id) - } else { - legacy_kademlia_protocol_name(&protocol_id) - }; swarms[swarm_n] .0 .behaviour_mut() .add_self_reported_address( &other, - &[protocol_name], + [protocol_name_from_protocol_id(&protocol_id)] + .iter(), addr, ); to_discover[swarm_n].remove(&other); }, - DiscoveryOut::RandomKademliaStarted => {}, + DiscoveryOut::RandomKademliaStarted(_) => {}, e => { panic!("Unexpected event: {:?}", e) }, @@ -1044,8 +1137,6 @@ mod tests { #[test] fn discovery_ignores_peers_with_unknown_protocols() { - let supported_genesis_hash = H256::from_low_u64_be(1); - let unsupported_genesis_hash = H256::from_low_u64_be(2); let supported_protocol_id = ProtocolId::from("a"); let unsupported_protocol_id = ProtocolId::from("b"); @@ -1056,37 +1147,21 @@ mod tests { .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .with_kademlia(supported_genesis_hash, None, &supported_protocol_id); + .add_protocol(supported_protocol_id.clone()); config.finish() }; - let predictable_peer_id = |bytes: &[u8; 32]| { - Keypair::Ed25519(ed25519::Keypair::from( - ed25519::SecretKey::from_bytes(bytes.to_owned()).unwrap(), - )) - .public() - .to_peer_id() - }; - - let remote_peer_id = predictable_peer_id(b"00000000000000000000000000000001"); - let remote_addr: Multiaddr = "/memory/1".parse().unwrap(); - let another_peer_id = predictable_peer_id(b"00000000000000000000000000000002"); - let another_addr: Multiaddr = "/memory/2".parse().unwrap(); + let remote_peer_id = PeerId::random(); + let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - // Try adding remote peers with unsupported protocols. + // Add remote peer with unsupported protocol. discovery.add_self_reported_address( &remote_peer_id, - &[kademlia_protocol_name(unsupported_genesis_hash, None)], + [protocol_name_from_protocol_id(&unsupported_protocol_id)].iter(), remote_addr.clone(), ); - discovery.add_self_reported_address( - &another_peer_id, - &[legacy_kademlia_protocol_name(&unsupported_protocol_id)], - another_addr.clone(), - ); - { - let kademlia = discovery.kademlia.as_mut().unwrap(); + for kademlia in discovery.kademlias.values_mut() { assert!( kademlia .kbucket(remote_peer_id) @@ -1094,34 +1169,75 @@ mod tests { .is_empty(), "Expect peer with unsupported protocol not to be added." ); - assert!( + } + + // Add remote peer with supported protocol. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&supported_protocol_id)].iter(), + remote_addr.clone(), + ); + + for kademlia in discovery.kademlias.values_mut() { + assert_eq!( + 1, kademlia - .kbucket(another_peer_id) + .kbucket(remote_peer_id) .expect("Remote peer id not to be equal to local peer id.") - .is_empty(), - "Expect peer with unsupported protocol not to be added." + .num_entries(), + "Expect peer with supported protocol to be added." ); } + } + + #[test] + fn discovery_adds_peer_to_kademlia_of_same_protocol_only() { + let protocol_a = ProtocolId::from("a"); + let protocol_b = ProtocolId::from("b"); + + let mut discovery = { + let keypair = Keypair::generate_ed25519(); + let mut config = DiscoveryConfig::new(keypair.public()); + config + .allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(protocol_a.clone()) + .add_protocol(protocol_b.clone()); + config.finish() + }; + + let remote_peer_id = PeerId::random(); + let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - // Add remote peers with supported protocols. + // Add remote peer with `protocol_a` only. discovery.add_self_reported_address( &remote_peer_id, - &[kademlia_protocol_name(supported_genesis_hash, None)], + [protocol_name_from_protocol_id(&protocol_a)].iter(), remote_addr.clone(), ); - discovery.add_self_reported_address( - &another_peer_id, - &[legacy_kademlia_protocol_name(&supported_protocol_id)], - another_addr.clone(), + + assert_eq!( + 1, + discovery + .kademlias + .get_mut(&protocol_a) + .expect("Kademlia instance to exist.") + .kbucket(remote_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .num_entries(), + "Expected remote peer to be added to `protocol_a` Kademlia instance.", ); - { - let kademlia = discovery.kademlia.as_mut().unwrap(); - assert_eq!( - 2, - kademlia.kbuckets().fold(0, |acc, bucket| acc + bucket.num_entries()), - "Expect peers with supported protocol to be added." - ); - } + assert!( + discovery + .kademlias + .get_mut(&protocol_b) + .expect("Kademlia instance to exist.") + .kbucket(remote_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expected remote peer not to be added to `protocol_b` Kademlia instance.", + ); } } diff --git a/client/network/common/src/error.rs b/client/network/src/error.rs similarity index 96% rename from client/network/common/src/error.rs rename to client/network/src/error.rs index 4326b1af52836..716235193a80f 100644 --- a/client/network/common/src/error.rs +++ b/client/network/src/error.rs @@ -18,10 +18,10 @@ //! Substrate network possible errors. -use crate::{config::TransportConfig, protocol::ProtocolName}; +use crate::config::TransportConfig; use libp2p::{Multiaddr, PeerId}; -use std::fmt; +use std::{borrow::Cow, fmt}; /// Result type alias for the network. pub type Result = std::result::Result; @@ -65,7 +65,7 @@ pub enum Error { #[error("Request-response protocol registered multiple times: {protocol}")] DuplicateRequestResponseProtocol { /// Name of the protocol registered multiple times. - protocol: ProtocolName, + protocol: Cow<'static, str>, }, } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index f3faa44ee6dbd..83bc1075b8bad 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -249,39 +249,35 @@ mod discovery; mod peer_info; mod protocol; mod request_responses; +mod schema; mod service; mod transport; +mod utils; +pub mod bitswap; pub mod config; +pub mod error; pub mod network_state; +pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::PeerInfo; -pub use sc_network_common::{ - protocol::{ - event::{DhtEvent, Event}, - role::ObservedRole, - ProtocolName, - }, - request_responses::{IfDisconnected, RequestFailure}, - service::{ - KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, - NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, Signature, - SigningError, - }, - sync::{ - warp::{WarpSyncPhase, WarpSyncProgress}, - StateDownloadProgress, SyncState, - }, +pub use protocol::{ + event::{DhtEvent, Event, ObservedRole}, + PeerInfo, +}; +pub use sc_network_common::sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + StateDownloadProgress, SyncState, }; pub use service::{ - DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, - NotificationSenderReady, OutboundFailure, PublicKey, + DecodingError, IfDisconnected, KademliaKey, Keypair, NetworkService, NetworkWorker, + NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, RequestFailure, + Signature, SigningError, }; -use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sc_peerset::ReputationChange; +use sp_runtime::traits::{Block as BlockT, NumberFor}; /// The maximum allowed number of established connections per peer. /// @@ -295,13 +291,40 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; -/// Abstraction over syncing-related services -pub trait ChainSyncInterface: - NetworkSyncForkRequest> + Send + Sync -{ +/// Minimum Requirements for a Hash within Networking +pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} + +impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static +{} + +/// Trait for providing information about the local network state +pub trait NetworkStateInfo { + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec; + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId; } -impl ChainSyncInterface for T where - T: NetworkSyncForkRequest> + Send + Sync -{ +/// Overview status of the network. +#[derive(Clone)] +pub struct NetworkStatus { + /// Current global sync state. + pub sync_state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_sync_peers: u32, + /// Total number of connected peers + pub num_connected_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, + /// State sync in progress. + pub state_sync: Option, + /// Warp sync in progress. + pub warp_sync: Option>, } diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index e04d006f50501..d668cb25ea455 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ @@ -23,11 +24,8 @@ use libp2p::{ connection::ConnectionId, either::EitherOutput, transport::ListenerId, ConnectedPoint, PeerId, PublicKey, }, - identify::{ - Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent, - Info as IdentifyInfo, - }, - ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent, Success as PingSuccess}, + identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}, + ping::{Ping, PingConfig, PingEvent, PingSuccess}, swarm::{ ConnectionHandler, IntoConnectionHandler, IntoConnectionHandlerSelect, NetworkBehaviour, NetworkBehaviourAction, PollParameters, @@ -35,7 +33,6 @@ use libp2p::{ Multiaddr, }; use log::{debug, error, trace}; -use sc_network_common::utils::interval; use smallvec::SmallVec; use std::{ collections::hash_map::Entry, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 63d060f423773..1c933fabcbb5d 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -16,7 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::config; +use crate::{ + config, error, + request_responses::RequestFailure, + utils::{interval, LruHashSet}, +}; use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; @@ -31,32 +35,28 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, info, log, trace, warn, Level}; -use lru::LruCache; -use message::{generic::Message as GenericMessage, Message}; +use message::{ + generic::{Message as GenericMessage, Roles}, + Message, +}; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; -use sc_client_api::HeaderBackend; -use sc_consensus::import_queue::{ - BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, -}; +use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; +use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sc_network_common::{ - config::NonReservedPeerMode, - error, - protocol::{role::Roles, ProtocolName}, - request_responses::RequestFailure, + config::ProtocolId, sync::{ message::{ - BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, - BlockResponse, BlockState, + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, }, warp::{EncodedProof, WarpProofRequest}, BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PollBlockAnnounceValidation, SyncStatus, }, - utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; +use sp_blockchain::HeaderMetadata; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, @@ -64,6 +64,7 @@ use sp_runtime::{ Justifications, }; use std::{ + borrow::Cow, collections::{HashMap, HashSet, VecDeque}, io, iter, num::NonZeroUsize, @@ -75,6 +76,7 @@ use std::{ mod notifications; +pub mod event; pub mod message; pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; @@ -84,6 +86,8 @@ const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. @@ -188,7 +192,7 @@ pub struct Protocol { /// Handles opening the unique substream and sending and receiving raw messages. behaviour: Notifications, /// List of notifications protocols that have been registered. - notification_protocols: Vec, + notification_protocols: Vec>, /// If we receive a new "substream open" event that contains an invalid handshake, we ask the /// inner layer to force-close the substream. Force-closing the substream will generate a /// "substream closed" event. This is a problem: since we can't propagate the "substream open" @@ -201,7 +205,7 @@ pub struct Protocol { /// The `PeerId`'s of all boot nodes. boot_node_ids: HashSet, /// A cache for the data that was associated to a block announcement. - block_announce_data_cache: LruCache>, + block_announce_data_cache: lru::LruCache>, } #[derive(Debug)] @@ -232,19 +236,50 @@ pub struct PeerInfo { pub best_number: ::Number, } +/// Handshake sent when we open a block announces substream. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +struct BlockAnnouncesHandshake { + /// Roles of the node. + roles: Roles, + /// Best block number. + best_number: NumberFor, + /// Best block hash. + best_hash: B::Hash, + /// Genesis block hash. + genesis_hash: B::Hash, +} + +impl BlockAnnouncesHandshake { + fn build( + roles: Roles, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> Self { + Self { genesis_hash, roles, best_number, best_hash } + } +} + impl Protocol where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Create a new instance. pub fn new( roles: Roles, chain: Arc, + protocol_id: ProtocolId, network_config: &config::NetworkConfiguration, + notifications_protocols_handshakes: Vec>, metrics_registry: Option<&Registry>, chain_sync: Box>, - block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); @@ -311,7 +346,7 @@ where bootnodes, reserved_nodes: default_sets_reserved.clone(), reserved_only: network_config.default_peers_set.non_reserved_mode == - NonReservedPeerMode::Deny, + config::NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -322,7 +357,7 @@ where } let reserved_only = - set_cfg.set_config.non_reserved_mode == NonReservedPeerMode::Deny; + set_cfg.set_config.non_reserved_mode == config::NonReservedPeerMode::Deny; sets.push(sc_peerset::SetConfig { in_peers: set_cfg.set_config.in_peers, @@ -336,34 +371,44 @@ where sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; + let block_announces_protocol: Cow<'static, str> = + format!("/{}/block-announces/1", protocol_id.as_ref()).into(); + let behaviour = { + let best_number = info.best_number; + let best_hash = info.best_hash; + let genesis_hash = info.genesis_hash; + + let block_announces_handshake = + BlockAnnouncesHandshake::::build(roles, best_number, best_hash, genesis_hash) + .encode(); + + let sync_protocol_config = notifications::ProtocolConfig { + name: block_announces_protocol, + fallback_names: Vec::new(), + handshake: block_announces_handshake, + max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, + }; + Notifications::new( peerset, - // NOTE: Block announcement protocol is still very much hardcoded into `Protocol`. - // This protocol must be the first notification protocol given to - // `Notifications` - iter::once(notifications::ProtocolConfig { - name: block_announces_protocol.notifications_protocol.clone(), - fallback_names: block_announces_protocol.fallback_names.clone(), - handshake: block_announces_protocol.handshake.as_ref().unwrap().to_vec(), - max_notification_size: block_announces_protocol.max_notification_size, - }) - .chain(network_config.extra_sets.iter().map(|s| notifications::ProtocolConfig { - name: s.notifications_protocol.clone(), - fallback_names: s.fallback_names.clone(), - handshake: s.handshake.as_ref().map_or(roles.encode(), |h| (*h).to_vec()), - max_notification_size: s.max_notification_size, - })), + iter::once(sync_protocol_config).chain( + network_config.extra_sets.iter().zip(notifications_protocols_handshakes).map( + |(s, hs)| notifications::ProtocolConfig { + name: s.notifications_protocol.clone(), + fallback_names: s.fallback_names.clone(), + handshake: hs, + max_notification_size: s.max_notification_size, + }, + ), + ), ) }; - let cache_capacity = NonZeroUsize::new( - (network_config.default_peers_set.in_peers as usize + - network_config.default_peers_set.out_peers as usize) - .max(1), - ) - .expect("cache capacity is not zero"); - let block_announce_data_cache = LruCache::new(cache_capacity); + let block_announce_data_cache = lru::LruCache::new( + network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize, + ); let protocol = Self { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), @@ -384,8 +429,10 @@ where }, peerset_handle: peerset_handle.clone(), behaviour, - notification_protocols: iter::once(block_announces_protocol.notifications_protocol) - .chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone())) + notification_protocols: network_config + .extra_sets + .iter() + .map(|s| s.notifications_protocol.clone()) .collect(), bad_handshake_substreams: Default::default(), metrics: if let Some(r) = metrics_registry { @@ -411,10 +458,13 @@ where } /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { + pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: &str) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position)); + self.behaviour.disconnect_peer( + peer_id, + sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS), + ); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } @@ -575,7 +625,6 @@ where CustomMessageOutcome::BlockImport(origin, blocks), Ok(OnBlockData::Request(peer, req)) => prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), - Ok(OnBlockData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -923,7 +972,6 @@ where CustomMessageOutcome::BlockImport(origin, blocks), Ok(OnBlockData::Request(peer, req)) => prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), - Ok(OnBlockData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -951,6 +999,18 @@ where self.chain_sync.clear_justification_requests(); } + /// Request syncing for the given block from given set of peers. + /// Uses `protocol` to queue a new block download request and tries to dispatch all pending + /// requests. + pub fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + self.chain_sync.set_sync_fork_request(peers, hash, number) + } + /// A batch of blocks have been processed, with or without errors. /// Call this when a batch of blocks have been processed by the importqueue, with or without /// errors. @@ -1023,9 +1083,10 @@ where } /// Sets the list of reserved peers for the given protocol/peerset. - pub fn set_reserved_peerset_peers(&self, protocol: ProtocolName, peers: HashSet) { + pub fn set_reserved_peerset_peers(&self, protocol: Cow<'static, str>, peers: HashSet) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.set_reserved_peers(sc_peerset::SetId::from(index), peers); + self.peerset_handle + .set_reserved_peers(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peers); } else { error!( target: "sub-libp2p", @@ -1036,9 +1097,12 @@ where } /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { + pub fn remove_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_reserved_peer(sc_peerset::SetId::from(index), peer); + self.peerset_handle.remove_reserved_peer( + sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), + peer, + ); } else { error!( target: "sub-libp2p", @@ -1049,9 +1113,10 @@ where } /// Adds a `PeerId` to the list of reserved peers. - pub fn add_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { + pub fn add_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.add_reserved_peer(sc_peerset::SetId::from(index), peer); + self.peerset_handle + .add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); } else { error!( target: "sub-libp2p", @@ -1071,9 +1136,10 @@ where } /// Add a peer to a peers set. - pub fn add_to_peers_set(&self, protocol: ProtocolName, peer: PeerId) { + pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.add_to_peers_set(sc_peerset::SetId::from(index), peer); + self.peerset_handle + .add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); } else { error!( target: "sub-libp2p", @@ -1084,9 +1150,12 @@ where } /// Remove a peer from a peers set. - pub fn remove_from_peers_set(&self, protocol: ProtocolName, peer: PeerId) { + pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_from_peers_set(sc_peerset::SetId::from(index), peer); + self.peerset_handle.remove_from_peers_set( + sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), + peer, + ); } else { error!( target: "sub-libp2p", @@ -1184,31 +1253,31 @@ fn prepare_warp_sync_request( #[must_use] pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), + JustificationImport(Origin, B::Hash, NumberFor, Justifications), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocol: ProtocolName, + protocol: Cow<'static, str>, /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - negotiated_fallback: Option, + negotiated_fallback: Option>, roles: Roles, notifications_sink: NotificationsSink, }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { remote: PeerId, - protocol: ProtocolName, + protocol: Cow<'static, str>, notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. NotificationStreamClosed { remote: PeerId, - protocol: ProtocolName, + protocol: Cow<'static, str>, }, /// Messages have been received on one or more notifications protocols. NotificationsReceived { remote: PeerId, - messages: Vec<(ProtocolName, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, /// A new block request must be emitted. BlockRequest { @@ -1240,7 +1309,13 @@ pub enum CustomMessageOutcome { impl NetworkBehaviour for Protocol where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { type ConnectionHandler = ::ConnectionHandler; type OutEvent = CustomMessageOutcome; @@ -1432,7 +1507,7 @@ where for (id, request) in self .chain_sync .block_requests() - .map(|(peer_id, request)| (peer_id, request)) + .map(|(peer_id, request)| (*peer_id, request)) .collect::>() { let event = @@ -1453,11 +1528,8 @@ where self.pending_messages.push_back(event); } - // Advance the state of `ChainSync` - // - // Process any received requests received from `NetworkService` and - // check if there is any block announcement validation finished. - while let Poll::Ready(result) = self.chain_sync.poll(cx) { + // Check if there is any block announcement validation finished. + while let Poll::Ready(result) = self.chain_sync.poll_block_announce_validation(cx) { match self.process_block_announce_validation_result(result) { CustomMessageOutcome::None => {}, outcome => self.pending_messages.push_back(outcome), @@ -1495,6 +1567,8 @@ where } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { + debug_assert!(negotiated_fallback.is_none()); + // `received_handshake` can be either a `Status` message if received from the // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block // announces substream. @@ -1551,12 +1625,14 @@ where } } else { match ( - Roles::decode_all(&mut &received_handshake[..]), + message::Roles::decode_all(&mut &received_handshake[..]), self.peers.get(&peer_id), ) { (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), negotiated_fallback, roles, notifications_sink, @@ -1568,7 +1644,9 @@ where // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), negotiated_fallback, roles: peer.info.roles, notifications_sink, @@ -1592,7 +1670,9 @@ where } else { CustomMessageOutcome::NotificationStreamReplaced { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), notifications_sink, } }, @@ -1617,7 +1697,9 @@ where } else { CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), } } }, @@ -1650,7 +1732,9 @@ where _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => CustomMessageOutcome::None, _ => { - let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); + let protocol_name = self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(); CustomMessageOutcome::NotificationsReceived { remote: peer_id, messages: vec![(protocol_name, message.freeze())], diff --git a/client/network/common/src/protocol/event.rs b/client/network/src/protocol/event.rs similarity index 76% rename from client/network/common/src/protocol/event.rs rename to client/network/src/protocol/event.rs index 236913df1b120..26c9544960605 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -19,10 +19,9 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. -use super::ProtocolName; -use crate::protocol::role::ObservedRole; use bytes::Bytes; use libp2p::{core::PeerId, kad::record::Key}; +use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -68,15 +67,15 @@ pub enum Event { remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. /// This is always equal to the value of - /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the + /// [`crate::config::NonDefaultSetConfig::notifications_protocol`] of one of the /// configured sets. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// Always contains a value equal to the value in - /// `sc_network::config::NonDefaultSetConfig::fallback_names`. - negotiated_fallback: Option, + /// [`crate::config::NonDefaultSetConfig::fallback_names`]. + negotiated_fallback: Option>, /// Role of the remote. role: ObservedRole, }, @@ -87,7 +86,7 @@ pub enum Event { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -95,6 +94,29 @@ pub enum Event { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ProtocolName, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, } + +/// Role that the peer sent to us during the handshake, with the addition of what our local node +/// knows about that peer. +/// +/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a +/// > node says about itself, while `ObservedRole` is a `Role` merged with the +/// > information known locally about that node. +#[derive(Debug, Clone)] +pub enum ObservedRole { + /// Full node. + Full, + /// Light node. + Light, + /// Third-party authority. + Authority, +} + +impl ObservedRole { + /// Returns `true` for `ObservedRole::Light`. + pub fn is_light(&self) -> bool { + matches!(self, Self::Light) + } +} diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ef652387d2c7d..50c4a264a5f95 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -21,7 +21,7 @@ pub use self::generic::{ RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, RemoteHeaderRequest, - RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, + RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, Roles, }; use codec::{Decode, Encode}; use sc_client_api::StorageProof; @@ -36,6 +36,9 @@ pub type Message = generic::Message< ::Extrinsic, >; +/// A set of transactions. +pub type Transactions = Vec; + /// Remote call response. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct RemoteCallResponse { @@ -56,12 +59,12 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { - use super::{RemoteCallResponse, RemoteReadResponse}; - use codec::{Decode, Encode, Input}; + use super::{RemoteCallResponse, RemoteReadResponse, Transactions}; + use bitflags::bitflags; + use codec::{Decode, Encode, Input, Output}; use sc_client_api::StorageProof; use sc_network_common::{ message::RequestId, - protocol::role::Roles, sync::message::{ generic::{BlockRequest, BlockResponse}, BlockAnnounce, @@ -69,6 +72,60 @@ pub mod generic { }; use sp_runtime::ConsensusEngineId; + bitflags! { + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } + } + + impl Roles { + /// Does this role represents a client that holds full chain data locally? + pub fn is_full(&self) -> bool { + self.intersects(Self::FULL | Self::AUTHORITY) + } + + /// Does this role represents a client that does not participates in the consensus? + pub fn is_authority(&self) -> bool { + *self == Self::AUTHORITY + } + + /// Does this role represents a client that does not hold full chain data locally? + pub fn is_light(&self) -> bool { + !self.is_full() + } + } + + impl<'a> From<&'a crate::config::Role> for Roles { + fn from(roles: &'a crate::config::Role) -> Self { + match roles { + crate::config::Role::Full => Self::FULL, + crate::config::Role::Authority { .. } => Self::AUTHORITY, + } + } + } + + impl codec::Encode for Roles { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } + } + + impl codec::EncodeLike for Roles {} + + impl codec::Decode for Roles { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + } + } + /// Consensus is mostly opaque to us #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { @@ -89,10 +146,9 @@ pub mod generic { BlockResponse(BlockResponse), /// Block announce. BlockAnnounce(BlockAnnounce

), + /// Transactions. + Transactions(Transactions), /// Consensus protocol message. - // NOTE: index is incremented by 1 due to transaction-related - // message that was removed - #[codec(index = 6)] Consensus(ConsensusMessage), /// Remote method call request. RemoteCallRequest(RemoteCallRequest), diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 04f6fe445ac63..1f872ec857e79 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -33,14 +33,15 @@ use libp2p::{ use log::{error, trace, warn}; use parking_lot::RwLock; use rand::distributions::{Distribution as _, Uniform}; -use sc_network_common::protocol::ProtocolName; use sc_peerset::DropReason; use smallvec::SmallVec; use std::{ + borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}, mem, pin::Pin, + str, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, @@ -139,9 +140,9 @@ pub struct Notifications { #[derive(Debug, Clone)] pub struct ProtocolConfig { /// Name of the protocol. - pub name: ProtocolName, + pub name: Cow<'static, str>, /// Names of the protocol to use if the main one isn't available. - pub fallback_names: Vec, + pub fallback_names: Vec>, /// Handshake of the protocol. pub handshake: Vec, /// Maximum allowed size for a notification. @@ -308,7 +309,7 @@ pub enum NotificationsOut { set_id: sc_peerset::SetId, /// If `Some`, a fallback protocol name has been used rather the main protocol name. /// Always matches one of the fallback names passed at initialization. - negotiated_fallback: Option, + negotiated_fallback: Option>, /// Handshake that was sent to us. /// This is normally a "Status" message, but this is out of the concern of this code. received_handshake: Vec, diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs index ea09cb76edce1..c1602319d0ac4 100644 --- a/client/network/src/protocol/notifications/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -80,11 +80,12 @@ use libp2p::{ }; use log::error; use parking_lot::{Mutex, RwLock}; -use sc_network_common::protocol::ProtocolName; use std::{ + borrow::Cow, collections::VecDeque, mem, pin::Pin, + str, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, @@ -145,9 +146,9 @@ pub struct NotifsHandler { #[derive(Debug, Clone)] pub struct ProtocolConfig { /// Name of the protocol. - pub name: ProtocolName, + pub name: Cow<'static, str>, /// Names of the protocol to use if the main one isn't available. - pub fallback_names: Vec, + pub fallback_names: Vec>, /// Handshake of the protocol. The `RwLock` is locked every time a new substream is opened. pub handshake: Arc>>, /// Maximum allowed size for a notification. @@ -296,7 +297,7 @@ pub enum NotifsHandlerOut { /// Index of the protocol in the list of protocols passed at initialization. protocol_index: usize, /// Name of the protocol that was actually negotiated, if the default one wasn't available. - negotiated_fallback: Option, + negotiated_fallback: Option>, /// The endpoint of the connection that is open for custom protocols. endpoint: ConnectedPoint, /// Handshake that was sent to us. diff --git a/client/network/src/protocol/notifications/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs index 56cfefd75d53d..3fbb59d399a0e 100644 --- a/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -39,8 +39,8 @@ use bytes::BytesMut; use futures::prelude::*; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::{error, warn}; -use sc_network_common::protocol::ProtocolName; use std::{ + borrow::Cow, convert::Infallible, io, mem, pin::Pin, @@ -58,7 +58,7 @@ const MAX_HANDSHAKE_SIZE: usize = 1024; pub struct NotificationsIn { /// Protocol name to use when negotiating the substream. /// The first one is the main name, while the other ones are fall backs. - protocol_names: Vec, + protocol_names: Vec>, /// Maximum allowed size for a single notification. max_notification_size: u64, } @@ -69,7 +69,7 @@ pub struct NotificationsIn { pub struct NotificationsOut { /// Protocol name to use when negotiating the substream. /// The first one is the main name, while the other ones are fall backs. - protocol_names: Vec, + protocol_names: Vec>, /// Message to send when we start the handshake. initial_message: Vec, /// Maximum allowed size for a single notification. @@ -114,8 +114,8 @@ pub struct NotificationsOutSubstream { impl NotificationsIn { /// Builds a new potential upgrade. pub fn new( - main_protocol_name: impl Into, - fallback_names: Vec, + main_protocol_name: impl Into>, + fallback_names: Vec>, max_notification_size: u64, ) -> Self { let mut protocol_names = fallback_names; @@ -126,11 +126,16 @@ impl NotificationsIn { } impl UpgradeInfo for NotificationsIn { - type Info = ProtocolName; + type Info = StringProtocolName; type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.protocol_names.clone().into_iter() + self.protocol_names + .iter() + .cloned() + .map(StringProtocolName) + .collect::>() + .into_iter() } } @@ -167,10 +172,10 @@ where Ok(NotificationsInOpen { handshake, - negotiated_fallback: if negotiated_name == self.protocol_names[0] { + negotiated_fallback: if negotiated_name.0 == self.protocol_names[0] { None } else { - Some(negotiated_name) + Some(negotiated_name.0) }, substream, }) @@ -184,7 +189,7 @@ pub struct NotificationsInOpen { pub handshake: Vec, /// If the negotiated name is not the "main" protocol name but a fallback, contains the /// name of the negotiated fallback. - pub negotiated_fallback: Option, + pub negotiated_fallback: Option>, /// Implementation of `Stream` that allows receives messages from the substream. pub substream: NotificationsInSubstream, } @@ -329,8 +334,8 @@ where impl NotificationsOut { /// Builds a new potential upgrade. pub fn new( - main_protocol_name: impl Into, - fallback_names: Vec, + main_protocol_name: impl Into>, + fallback_names: Vec>, initial_message: impl Into>, max_notification_size: u64, ) -> Self { @@ -346,12 +351,27 @@ impl NotificationsOut { } } +/// Implementation of the `ProtocolName` trait, where the protocol name is a string. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StringProtocolName(Cow<'static, str>); + +impl upgrade::ProtocolName for StringProtocolName { + fn protocol_name(&self) -> &[u8] { + self.0.as_bytes() + } +} + impl UpgradeInfo for NotificationsOut { - type Info = ProtocolName; + type Info = StringProtocolName; type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.protocol_names.clone().into_iter() + self.protocol_names + .iter() + .cloned() + .map(StringProtocolName) + .collect::>() + .into_iter() } } @@ -386,10 +406,10 @@ where Ok(NotificationsOutOpen { handshake, - negotiated_fallback: if negotiated_name == self.protocol_names[0] { + negotiated_fallback: if negotiated_name.0 == self.protocol_names[0] { None } else { - Some(negotiated_name) + Some(negotiated_name.0) }, substream: NotificationsOutSubstream { socket: Framed::new(socket, codec) }, }) @@ -403,7 +423,7 @@ pub struct NotificationsOutOpen { pub handshake: Vec, /// If the negotiated name is not the "main" protocol name but a fallback, contains the /// name of the negotiated fallback. - pub negotiated_fallback: Option, + pub negotiated_fallback: Option>, /// Implementation of `Sink` that allows sending messages on the substream. pub substream: NotificationsOutSubstream, } @@ -485,10 +505,11 @@ mod tests { use async_std::net::{TcpListener, TcpStream}; use futures::{channel::oneshot, prelude::*}; use libp2p::core::upgrade; + use std::borrow::Cow; #[test] fn basic_works() { - const PROTO_NAME: &str = "/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -531,7 +552,7 @@ mod tests { fn empty_handshake() { // Check that everything still works when the handshake messages are empty. - const PROTO_NAME: &str = "/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -572,7 +593,7 @@ mod tests { #[test] fn refused() { - const PROTO_NAME: &str = "/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -613,7 +634,7 @@ mod tests { #[test] fn large_initial_message_refused() { - const PROTO_NAME: &str = "/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -651,7 +672,7 @@ mod tests { #[test] fn large_handshake_refused() { - const PROTO_NAME: &str = "/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index d49cbd8051341..cec4aa2a07fba 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -50,13 +50,9 @@ use libp2p::{ NetworkBehaviourAction, PollParameters, }, }; -use sc_network_common::{ - protocol::ProtocolName, - request_responses::{ - IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure, - }, -}; +use sc_network_common::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; use std::{ + borrow::Cow, collections::{hash_map::Entry, HashMap}, io, iter, pin::Pin, @@ -77,7 +73,7 @@ pub enum Event { /// Peer which has emitted the request. peer: PeerId, /// Name of the protocol in question. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// Whether handling the request was successful or unsuccessful. /// /// When successful contains the time elapsed between when we received the request and when @@ -93,7 +89,7 @@ pub enum Event { /// Peer that we send a request to. peer: PeerId, /// Name of the protocol in question. - protocol: ProtocolName, + protocol: Cow<'static, str>, /// Duration the request took. duration: Duration, /// Result of the request. @@ -112,23 +108,43 @@ pub enum Event { /// [`ProtocolRequestId`]s. #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct ProtocolRequestId { - protocol: ProtocolName, + protocol: Cow<'static, str>, request_id: RequestId, } -impl From<(ProtocolName, RequestId)> for ProtocolRequestId { - fn from((protocol, request_id): (ProtocolName, RequestId)) -> Self { +impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { + fn from((protocol, request_id): (Cow<'static, str>, RequestId)) -> Self { Self { protocol, request_id } } } +/// When sending a request, what to do on a disconnected recipient. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum IfDisconnected { + /// Try to connect to the peer. + TryConnect, + /// Just fail if the destination is not yet connected. + ImmediateError, +} + +/// Convenience functions for `IfDisconnected`. +impl IfDisconnected { + /// Shall we connect to a disconnected peer? + pub fn should_connect(self) -> bool { + match self { + Self::TryConnect => true, + Self::ImmediateError => false, + } + } +} + /// Implementation of `NetworkBehaviour` that provides support for request-response protocols. pub struct RequestResponsesBehaviour { /// The multiple sub-protocols, by name. /// Contains the underlying libp2p `RequestResponse` behaviour, plus an optional /// "response builder" used to build responses for incoming requests. protocols: HashMap< - ProtocolName, + Cow<'static, str>, (RequestResponse, Option>), >, @@ -164,7 +180,7 @@ struct MessageRequest { request_id: RequestId, request: Vec, channel: ResponseChannel, ()>>, - protocol: ProtocolName, + protocol: String, resp_builder: Option>, // Once we get incoming request we save all params, create an async call to Peerset // to get the reputation of the peer. @@ -175,7 +191,7 @@ struct MessageRequest { struct RequestProcessingOutcome { peer: PeerId, request_id: RequestId, - protocol: ProtocolName, + protocol: Cow<'static, str>, inner_channel: ResponseChannel, ()>>, response: OutgoingResponse, } @@ -204,9 +220,7 @@ impl RequestResponsesBehaviour { max_request_size: protocol.max_request_size, max_response_size: protocol.max_response_size, }, - iter::once(protocol.name.as_bytes().to_vec()) - .chain(protocol.fallback_names.iter().map(|name| name.as_bytes().to_vec())) - .zip(iter::repeat(protocol_support)), + iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), cfg, ); @@ -497,6 +511,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { debug_assert!(false, "Received message on outbound-only protocol."); } + let protocol = Cow::from(protocol); self.pending_responses.push(Box::pin(async move { // The `tx` created above can be dropped if we are not capable of // processing this request, which is reflected as a @@ -619,7 +634,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, request, channel, - protocol: protocol.clone(), + protocol: protocol.to_string(), resp_builder: resp_builder.clone(), get_peer_reputation, }); @@ -767,7 +782,24 @@ impl NetworkBehaviour for RequestResponsesBehaviour { pub enum RegisterError { /// A protocol has been specified multiple times. #[error("{0}")] - DuplicateProtocol(ProtocolName), + DuplicateProtocol(Cow<'static, str>), +} + +/// Error in a request. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RequestFailure { + #[error("We are not currently connected to the requested peer.")] + NotConnected, + #[error("Given protocol hasn't been registered.")] + UnknownProtocol, + #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] + Refused, + #[error("The remote replied, but the local node is no longer interested in the response.")] + Obsolete, + /// Problem on the network. + #[error("Problem on the network: {0}")] + Network(OutboundFailure), } /// Error when processing a request sent by a remote. @@ -995,7 +1027,6 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), - fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1096,7 +1127,6 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), - fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 8, // <-- important for the test request_timeout: Duration::from_secs(30), @@ -1193,7 +1223,6 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), - fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1201,7 +1230,6 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), - fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1219,7 +1247,6 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), - fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1227,7 +1254,6 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), - fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), diff --git a/client/network/bitswap/src/schema.rs b/client/network/src/schema.rs similarity index 97% rename from client/network/bitswap/src/schema.rs rename to client/network/src/schema.rs index 362e59aca68f9..4893bc28a7355 100644 --- a/client/network/bitswap/src/schema.rs +++ b/client/network/src/schema.rs @@ -18,6 +18,6 @@ //! Include sources generated from protobuf definitions. -pub(crate) mod bitswap { +pub mod bitswap { include!(concat!(env!("OUT_DIR"), "/bitswap.message.rs")); } diff --git a/client/network/bitswap/src/schema/bitswap.v1.2.0.proto b/client/network/src/schema/bitswap.v1.2.0.proto similarity index 100% rename from client/network/bitswap/src/schema/bitswap.v1.2.0.proto rename to client/network/src/schema/bitswap.v1.2.0.proto diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 5ffd36007f530..409ed88c75c00 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -29,21 +29,24 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, - config::Params, + bitswap::Bitswap, + config::{parse_str_addr, Params, TransportConfig}, discovery::DiscoveryConfig, + error::Error, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - protocol::{self, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready}, - transport, ChainSyncInterface, ReputationChange, + protocol::{ + self, event::Event, message::generic::Roles, NotificationsSink, NotifsHandlerError, + PeerInfo, Protocol, Ready, + }, + transactions, transport, DhtEvent, ExHashT, NetworkStateInfo, NetworkStatus, ReputationChange, }; -use codec::Encode; +use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, - identify::Info as IdentifyInfo, - kad::record::Key as KademliaKey, multiaddr, ping::Failure as PingFailure, swarm::{ @@ -55,29 +58,15 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; -use sc_network_common::{ - config::{MultiaddrWithPeerId, TransportConfig}, - error::Error, - protocol::{ - event::{DhtEvent, Event}, - ProtocolName, - }, - request_responses::{IfDisconnected, RequestFailure}, - service::{ - NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner, - NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, - NotificationSender as NotificationSenderT, NotificationSenderError, - NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, - }, - sync::SyncStatus, - ExHashT, -}; +use sc_network_common::sync::{SyncState, SyncStatus}; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::HeaderBackend; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ + borrow::Cow, cmp, collections::{HashMap, HashSet}, fs, iter, @@ -92,15 +81,24 @@ use std::{ task::Poll, }; -pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; +pub use behaviour::{ + IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, ResponseFailure, +}; mod metrics; mod out_events; +mod signature; #[cfg(test)] mod tests; -pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; -use sc_network_common::service::{NetworkBlock, NetworkRequest}; +pub use libp2p::{ + identity::{ + error::{DecodingError, SigningError}, + Keypair, PublicKey, + }, + kad::record::Key as KademliaKey, +}; +pub use signature::Signature; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -120,12 +118,10 @@ pub struct NetworkService { /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: TracingUnboundedSender>, - /// Interface that can be used to delegate calls to `ChainSync` - chain_sync_service: Box>, + to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, @@ -138,29 +134,41 @@ impl NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Creates the network service. /// /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(mut params: Params) -> Result { + pub fn new(mut params: Params) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); let local_peer_id = local_public.to_peer_id(); - params - .network_config - .request_response_protocols - .extend(params.request_response_protocol_configs); - params.network_config.boot_nodes = params .network_config .boot_nodes .into_iter() - .filter(|boot_node| boot_node.peer_id != local_peer_id) + .filter(|boot_node| { + if boot_node.peer_id == local_peer_id { + warn!( + target: "sub-libp2p", + "Local peer ID used in bootnode, ignoring: {}", + boot_node, + ); + false + } else { + true + } + }) .collect(); params.network_config.default_peers_set.reserved_nodes = params .network_config @@ -216,19 +224,34 @@ where fs::create_dir_all(path)?; } + let transactions_handler_proto = + transactions::TransactionsHandlerPrototype::new(params.protocol_id.clone()); + params + .network_config + .extra_sets + .insert(0, transactions_handler_proto.set_config()); + info!( target: "sub-libp2p", "🏷 Local node identity is: {}", local_peer_id.to_base58(), ); + let default_notif_handshake_message = Roles::from(¶ms.role).encode(); + let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), params.chain.clone(), + params.protocol_id.clone(), ¶ms.network_config, + iter::once(Vec::new()) + .chain( + (0..params.network_config.extra_sets.len() - 1) + .map(|_| default_notif_handshake_message.clone()), + ) + .collect(), params.metrics_registry.as_ref(), params.chain_sync, - params.block_announce_config, )?; // List of multiaddresses that we know in the network. @@ -264,11 +287,6 @@ where let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); - let block_request_protocol_name = params.block_request_protocol_config.name.clone(); - let state_request_protocol_name = params.state_request_protocol_config.name.clone(); - let warp_sync_protocol_name = - params.warp_sync_protocol_config.as_ref().map(|c| c.name.clone()); - // Build the swarm. let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = format!( @@ -282,13 +300,7 @@ where config.discovery_limit( u64::from(params.network_config.default_peers_set.out_peers) + 15, ); - let genesis_hash = params - .chain - .hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"); - config.with_kademlia(genesis_hash, params.fork_id.as_deref(), ¶ms.protocol_id); + config.add_protocol(params.protocol_id.clone()); config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); config.use_kademlia_disjoint_query_paths( @@ -361,6 +373,7 @@ where }; let behaviour = { + let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(params.chain)); let result = Behaviour::new( protocol, user_agent, @@ -369,6 +382,7 @@ where params.block_request_protocol_config, params.state_request_protocol_config, params.warp_sync_protocol_config, + bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, peerset_handle.clone(), @@ -446,7 +460,6 @@ where local_peer_id, local_identity, to_worker, - chain_sync_service: params.chain_sync_service, peers_notifications_sinks: peers_notifications_sinks.clone(), notifications_sizes_metric: metrics .as_ref() @@ -454,6 +467,13 @@ where _marker: PhantomData, }); + let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( + service.clone(), + params.transaction_pool, + params.metrics_registry.as_ref(), + )?; + (params.transactions_handler_executor)(tx_handler.run().boxed()); + Ok(NetworkWorker { external_addresses, num_connected, @@ -464,12 +484,9 @@ where from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, + tx_handler_controller, metrics, boot_node_ids, - block_request_protocol_name, - state_request_protocol_name, - warp_sync_protocol_name, - _marker: Default::default(), }) } @@ -684,8 +701,9 @@ where self.service.remove_reserved_peer(peer); } - /// Adds a `PeerId` and its `Multiaddr` as reserved. - pub fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { self.service.add_reserved_peer(peer) } @@ -696,157 +714,301 @@ where } impl NetworkService { - /// Get network state. - /// - /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally - /// everywhere about this. Please don't use this function to retrieve actual information. + /// Returns the local `PeerId`. + pub fn local_peer_id(&self) -> &PeerId { + &self.local_peer_id + } + + /// Signs the message with the `KeyPair` that defined the local `PeerId`. + pub fn sign_with_local_identity( + &self, + msg: impl AsRef<[u8]>, + ) -> Result { + Signature::sign_message(msg.as_ref(), &self.local_identity) + } + + /// Set authorized peers. /// - /// Returns an error if the `NetworkWorker` is no longer running. - pub async fn network_state(&self) -> Result { - let (tx, rx) = oneshot::channel(); + /// Need a better solution to manage authorized peers, but now just use reserved peers for + /// prototyping. + pub fn set_authorized_peers(&self, peers: HashSet) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + } + /// Set authorized_only flag. + /// + /// Need a better solution to decide authorized_only, but now just use reserved_only flag for + /// prototyping. + pub fn set_authorized_only(&self, reserved_only: bool) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::NetworkState { pending_response: tx }); + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + } - match rx.await { - Ok(v) => v.map_err(|_| ()), - // The channel can only be closed if the network worker no longer exists. - Err(_) => Err(()), - } + /// Adds an address known to a node. + pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); } - /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. + /// Has no effect if the notifications channel with this protocol name is not open. /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn split_multiaddr_and_peer_id( + /// If the buffer of pending outgoing notifications with that peer is full, the notification + /// is silently dropped and the connection to the remote will start being shut down. This + /// happens if you call this method at a higher rate than the rate at which the peer processes + /// these notifications, or if the available network bandwidth is too low. + /// + /// For this reason, this method is considered soft-deprecated. You are encouraged to use + /// [`NetworkService::notification_sender`] instead. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + pub fn write_notification( &self, - peers: HashSet, - ) -> Result, String> { - peers - .into_iter() - .map(|mut addr| { - let peer = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| "Invalid PeerId format".to_string())?, - _ => return Err("Missing PeerId from address".to_string()), - }; - - // Make sure the local peer ID is never added to the PSM - // or added as a "known address", even if given. - if peer == self.local_peer_id { - Err("Local peer ID in peer set.".to_string()) - } else { - Ok((peer, addr)) - } - }) - .collect::, String>>() - } -} + target: PeerId, + protocol: Cow<'static, str>, + message: Vec, + ) { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + // Notification silently discarded, as documented. + debug!( + target: "sub-libp2p", + "Attempted to send notification on missing or closed substream: {}, {:?}", + target, protocol, + ); + return + } + }; -impl sp_consensus::SyncOracle for NetworkService { - fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) - } + if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { + notifications_sizes_metric + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); + } - fn is_offline(&self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, protocol, message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + sink.send_sync_notification(message); } -} -impl sc_consensus::JustificationSyncLink for NetworkService { - /// Request a justification for the given block from the network. + /// Obtains a [`NotificationSender`] for a connected peer, if it exists. /// - /// On success, the justification will be passed to the import queue that was part at - /// initialization as part of the configuration. - fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); - } + /// A `NotificationSender` is scoped to a particular connection to the peer that holds + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two + /// steps: + /// + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// for another notification, yielding a [`NotificationSenderReady`] token. + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// can only fail if the underlying notification substream or connection has suddenly closed. + /// + /// An error is returned by [`NotificationSenderReady::send`] if there exists no open + /// notifications substream with that combination of peer and protocol, or if the remote + /// has asked to close the notifications substream. If that happens, it is guaranteed that an + /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by + /// [`NetworkService::event_stream`]. + /// + /// If the remote requests to close the notifications substream, all notifications successfully + /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the + /// substream actually gets closed, but attempting to enqueue more notifications will now + /// return an error. It is however possible for the entire connection to be abruptly closed, + /// in which case enqueued notifications will be lost. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + /// + /// # Usage + /// + /// This method returns a struct that allows waiting until there is space available in the + /// buffer of messages towards the given peer. If the peer processes notifications at a slower + /// rate than we send them, this buffer will quickly fill up. + /// + /// As such, you should never do something like this: + /// + /// ```ignore + /// // Do NOT do this + /// for peer in peers { + /// if let Ok(n) = network.notification_sender(peer, ...) { + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } + /// } + /// } + /// ``` + /// + /// Doing so would slow down all peers to the rate of the slowest one. A malicious or + /// malfunctioning peer could intentionally process notifications at a very slow rate. + /// + /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one + /// maintained by `sc-network`, and use `notification_sender` to progressively send out + /// elements from your buffer. If this additional buffer is full (which will happen at some + /// point if the peer is too slow to process notifications), appropriate measures can be taken, + /// such as removing non-critical notifications from the buffer or disconnecting the peer + /// using [`NetworkService::disconnect_peer`]. + /// + /// + /// Notifications Per-peer buffer + /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet + /// ^ (not covered by + /// | sc-network) + /// + + /// Notifications should be dropped + /// if buffer is full + /// + /// + /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. + pub fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + return Err(NotificationSenderError::Closed) + } + }; - fn clear_justification_requests(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); - } -} + let notification_size_metric = self + .notifications_sizes_metric + .as_ref() + .map(|histogram| histogram.with_label_values(&["out", &protocol])); -impl NetworkStateInfo for NetworkService -where - B: sp_runtime::traits::Block, - H: ExHashT, -{ - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().clone() + Ok(NotificationSender { sink, protocol_name: protocol, notification_size_metric }) } - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId { - self.local_peer_id + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + /// + /// The name passed is used to identify the channel in the Prometheus metrics. Note that the + /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having + /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory + pub fn event_stream(&self, name: &'static str) -> impl Stream { + let (tx, rx) = out_events::channel(name); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); + rx } -} -impl NetworkSigner for NetworkService -where - B: sp_runtime::traits::Block, - H: ExHashT, -{ - fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { - Signature::sign_message(msg.as_ref(), &self.local_identity) + /// Sends a single targeted request to a specific peer. On success, returns the response of + /// the peer. + /// + /// Request-response protocols are a way to complement notifications protocols, but + /// notifications should remain the default ways of communicating information. For example, a + /// peer can announce something through a notification, after which the recipient can obtain + /// more information by performing a request. + /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way + /// you will get an error immediately for disconnected peers, instead of waiting for a + /// potentially very long connection attempt, which would suggest that something is wrong + /// anyway, as you are supposed to be connected because of the notification protocol. + /// + /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. + /// Such restrictions, if desired, need to be enforced at the call site(s). + /// + /// The protocol must have been registered through + /// [`NetworkConfiguration::request_response_protocols`]( + /// crate::config::NetworkConfiguration::request_response_protocols). + pub async fn request( + &self, + target: PeerId, + protocol: impl Into>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure> { + let (tx, rx) = oneshot::channel(); + + self.start_request(target, protocol, request, tx, connect); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } } -} -impl NetworkDHTProvider for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - /// Start getting a value from the DHT. + /// Variation of `request` which starts a request whose response is delivered on a provided + /// channel. /// - /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an - /// item on the [`NetworkWorker`] stream. - fn get_value(&self, key: &KademliaKey) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); + /// Instead of blocking and waiting for a reply, this function returns immediately, sending + /// responses via the passed in sender. This alternative API exists to make it easier to + /// integrate with message passing APIs. + /// + /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a + /// closing connection. This is expected behaviour. With `request` you would get a + /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. + pub fn start_request( + &self, + target: PeerId, + protocol: impl Into>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx, + connect, + }); } - /// Start putting a value in the DHT. + /// High-level network status information. /// - /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an - /// item on the [`NetworkWorker`] stream. - fn put_value(&self, key: KademliaKey, value: Vec) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); - } -} + /// Returns an error if the `NetworkWorker` is no longer running. + pub async fn status(&self) -> Result, ()> { + let (tx, rx) = oneshot::channel(); -impl NetworkSyncForkRequest> for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - self.chain_sync_service.set_sync_fork_request(peers, hash, number); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); + + match rx.await { + Ok(v) => v.map_err(|_| ()), + // The channel can only be closed if the network worker no longer exists. + Err(_) => Err(()), + } } -} -#[async_trait::async_trait] -impl NetworkStatusProvider for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - async fn status(&self) -> Result, ()> { + /// Get network state. + /// + /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally + /// everywhere about this. Please don't use this function to retrieve actual information. + /// + /// Returns an error if the `NetworkWorker` is no longer running. + pub async fn network_state(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); + .unbounded_send(ServiceToWorkerMsg::NetworkState { pending_response: tx }); match rx.await { Ok(v) => v.map_err(|_| ()), @@ -854,65 +1016,140 @@ where Err(_) => Err(()), } } -} -impl NetworkPeers for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn set_authorized_peers(&self, peers: HashSet) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + /// You may call this when new transactions are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn trigger_repropagate(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); + } + + /// You must call when new transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_transaction(&self, hash: H) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); + } + + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. This function forces such an announcement. + pub fn announce_block(&self, hash: B::Hash, data: Option>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + } + + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.peerset.report_peer(who, cost_benefit); } - fn set_authorized_only(&self, reserved_only: bool) { + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + /// + /// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also + /// prevents the local node from re-establishing an outgoing substream to this peer until it + /// is added again. + pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); } - fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + /// Request a justification for the given block from the network. + /// + /// On success, the justification will be passed to the import queue that was part at + /// initialization as part of the configuration. + pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.peerset.report_peer(who, cost_benefit); + /// Clear all pending justification requests. + pub fn clear_justification_requests(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); + } + + /// Are we in the process of downloading the chain? + pub fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) + } + + /// Start getting a value from the DHT. + /// + /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an + /// item on the [`NetworkWorker`] stream. + pub fn get_value(&self, key: &KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); } - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol)); + /// Start putting a value in the DHT. + /// + /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an + /// item on the [`NetworkWorker`] stream. + pub fn put_value(&self, key: KademliaKey, value: Vec) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } - fn accept_unreserved_peers(&self) { + /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. + pub fn accept_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } - fn deny_unreserved_peers(&self) { + /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing + /// purposes. + pub fn deny_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } - fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + /// + /// Returns an `Err` if the given string is not a valid multiaddress + /// or contains an invalid peer ID (which includes the local peer ID). + pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; // Make sure the local peer ID is never added to the PSM. - if peer.peer_id == self.local_peer_id { + if peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer.peer_id, peer.multiaddr)); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer.peer_id)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); Ok(()) } - fn remove_reserved_peer(&self, peer_id: PeerId) { + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer_id: PeerId) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - fn set_reserved_peers( + /// Sets the reserved set of a protocol to the given set of peers. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// The node will start establishing/accepting connections and substreams to/from peers in this + /// set, if it doesn't have any substream open with them yet. + /// + /// Note however, if a call to this function results in less peers on the reserved set, they + /// will not necessarily get disconnected (depending on available free slots in the peer set). + /// If you want to also disconnect those removed peers, you will have to call + /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit + /// this step if the peer set is in reserved only mode. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + pub fn set_reserved_peers( &self, - protocol: ProtocolName, + protocol: Cow<'static, str>, peers: HashSet, ) -> Result<(), String> { let peers_addrs = self.split_multiaddr_and_peer_id(peers)?; @@ -941,9 +1178,16 @@ where Ok(()) } - fn add_peers_to_reserved_set( + /// Add peers to a peer set. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + pub fn add_peers_to_reserved_set( &self, - protocol: ProtocolName, + protocol: Cow<'static, str>, peers: HashSet, ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; @@ -967,7 +1211,8 @@ where Ok(()) } - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec) { + /// Remove peers from a peer set. + pub fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -975,9 +1220,28 @@ where } } - fn add_to_peers_set( + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } + + /// Add a peer to a set of peers. + /// + /// If the set has slots available, it will try to open a substream with this peer. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + pub fn add_to_peers_set( &self, - protocol: ProtocolName, + protocol: Cow<'static, str>, peers: HashSet, ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; @@ -1001,7 +1265,10 @@ where Ok(()) } - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec) { + /// Remove peers from a peer set. + /// + /// If we currently have an open substream with this peer, it will soon be closed. + pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1009,144 +1276,90 @@ where } } - fn sync_num_connected(&self) -> usize { + /// Returns the number of peers we're connected to. + pub fn num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } -} -impl NetworkEventStream for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn event_stream(&self, name: &'static str) -> Pin + Send>> { - let (tx, rx) = out_events::channel(name); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); - Box::pin(rx) + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } -} - -impl NetworkNotification for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec) { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - // Notification silently discarded, as documented. - debug!( - target: "sub-libp2p", - "Attempted to send notification on missing or closed substream: {}, {:?}", - target, protocol, - ); - return - } - }; - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { - notifications_sizes_metric - .with_label_values(&["out", &protocol]) - .observe(message.len() as f64); - } + /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn split_multiaddr_and_peer_id( + &self, + peers: HashSet, + ) -> Result, String> { + peers + .into_iter() + .map(|mut addr| { + let peer = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| "Invalid PeerId format".to_string())?, + _ => return Err("Missing PeerId from address".to_string()), + }; - // Sending is communicated to the `NotificationsSink`. - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, protocol, message.len() - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - sink.send_sync_notification(message); + // Make sure the local peer ID is never added to the PSM + // or added as a "known address", even if given. + if peer == self.local_peer_id { + Err("Local peer ID in peer set.".to_string()) + } else { + Ok((peer, addr)) + } + }) + .collect::, String>>() } +} - fn notification_sender( - &self, - target: PeerId, - protocol: ProtocolName, - ) -> Result, NotificationSenderError> { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - return Err(NotificationSenderError::Closed) - } - }; - - let notification_size_metric = self - .notifications_sizes_metric - .as_ref() - .map(|histogram| histogram.with_label_values(&["out", &protocol])); +impl sp_consensus::SyncOracle for NetworkService { + fn is_major_syncing(&mut self) -> bool { + Self::is_major_syncing(self) + } - Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) + fn is_offline(&mut self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 } } -#[async_trait::async_trait] -impl NetworkRequest for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - async fn request( - &self, - target: PeerId, - protocol: ProtocolName, - request: Vec, - connect: IfDisconnected, - ) -> Result, RequestFailure> { - let (tx, rx) = oneshot::channel(); +impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { + fn is_major_syncing(&mut self) -> bool { + NetworkService::is_major_syncing(self) + } - self.start_request(target, protocol, request, tx, connect); + fn is_offline(&mut self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 + } +} - match rx.await { - Ok(v) => v, - // The channel can only be closed if the network worker no longer exists. If the - // network worker no longer exists, then all connections to `target` are necessarily - // closed, and we legitimately report this situation as a "ConnectionClosed". - Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), - } +impl sc_consensus::JustificationSyncLink for NetworkService { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + Self::request_justification(self, hash, number); } - fn start_request( - &self, - target: PeerId, - protocol: ProtocolName, - request: Vec, - tx: oneshot::Sender, RequestFailure>>, - connect: IfDisconnected, - ) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { - target, - protocol: protocol.into(), - request, - pending_response: tx, - connect, - }); + fn clear_justification_requests(&self) { + Self::clear_justification_requests(self); } } -impl NetworkBlock> for NetworkService +impl NetworkStateInfo for NetworkService where - B: BlockT + 'static, + B: sp_runtime::traits::Block, H: ExHashT, { - fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().clone() } - fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId { + self.local_peer_id } } @@ -1156,48 +1369,50 @@ pub struct NotificationSender { sink: NotificationsSink, /// Name of the protocol on the wire. - protocol_name: ProtocolName, + protocol_name: Cow<'static, str>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notification_size_metric: Option, } -#[async_trait::async_trait] -impl NotificationSenderT for NotificationSender { - async fn ready( - &self, - ) -> Result, NotificationSenderError> { - Ok(Box::new(NotificationSenderReady { +impl NotificationSender { + /// Returns a future that resolves when the `NotificationSender` is ready to send a + /// notification. + pub async fn ready(&self) -> Result, NotificationSenderError> { + Ok(NotificationSenderReady { ready: match self.sink.reserve_notification().await { - Ok(r) => Some(r), + Ok(r) => r, Err(()) => return Err(NotificationSenderError::Closed), }, peer_id: self.sink.peer_id(), protocol_name: &self.protocol_name, notification_size_metric: self.notification_size_metric.clone(), - })) + }) } } /// Reserved slot in the notifications buffer, ready to accept data. #[must_use] pub struct NotificationSenderReady<'a> { - ready: Option>, + ready: Ready<'a>, /// Target of the notification. peer_id: &'a PeerId, /// Name of the protocol on the wire. - protocol_name: &'a ProtocolName, + protocol_name: &'a Cow<'static, str>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notification_size_metric: Option, } -impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { - fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError> { +impl<'a> NotificationSenderReady<'a> { + /// Consumes this slots reservation and actually queues the notification. + pub fn send(self, notification: impl Into>) -> Result<(), NotificationSenderError> { + let notification = notification.into(); + if let Some(notification_size_metric) = &self.notification_size_metric { notification_size_metric.observe(notification.len() as f64); } @@ -1209,18 +1424,32 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { ); trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); - self.ready - .take() - .ok_or(NotificationSenderError::Closed)? - .send(notification) - .map_err(|()| NotificationSenderError::Closed) + self.ready.send(notification).map_err(|()| NotificationSenderError::Closed) } } +/// Error returned by [`NetworkService::send_notification`]. +#[derive(Debug, thiserror::Error)] +pub enum NotificationSenderError { + /// The notification receiver has been closed, usually because the underlying connection + /// closed. + /// + /// Some of the notifications most recently sent may not have been received. However, + /// the peer may still be connected and a new `NotificationSender` for the same + /// protocol obtained from [`NetworkService::notification_sender`]. + #[error("The notification receiver has been closed")] + Closed, + /// Protocol name hasn't been registered. + #[error("Protocol name hasn't been registered")] + BadProtocol, +} + /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. -enum ServiceToWorkerMsg { +enum ServiceToWorkerMsg { + PropagateTransaction(H), + PropagateTransactions, RequestJustification(B::Hash, NumberFor), ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), @@ -1231,15 +1460,16 @@ enum ServiceToWorkerMsg { AddReserved(PeerId), RemoveReserved(PeerId), SetReserved(HashSet), - SetPeersetReserved(ProtocolName, HashSet), - AddSetReserved(ProtocolName, PeerId), - RemoveSetReserved(ProtocolName, PeerId), - AddToPeersSet(ProtocolName, PeerId), - RemoveFromPeersSet(ProtocolName, PeerId), + SetPeersetReserved(Cow<'static, str>, HashSet), + AddSetReserved(Cow<'static, str>, PeerId), + RemoveSetReserved(Cow<'static, str>, PeerId), + AddToPeersSet(Cow<'static, str>, PeerId), + RemoveFromPeersSet(Cow<'static, str>, PeerId), + SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), Request { target: PeerId, - protocol: ProtocolName, + protocol: Cow<'static, str>, request: Vec, pending_response: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, @@ -1250,7 +1480,7 @@ enum ServiceToWorkerMsg { NetworkState { pending_response: oneshot::Sender>, }, - DisconnectPeer(PeerId, ProtocolName), + DisconnectPeer(PeerId, Cow<'static, str>), NewBestBlockImported(B::Hash, NumberFor), } @@ -1262,7 +1492,13 @@ pub struct NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, @@ -1277,7 +1513,7 @@ where /// The import queue that was passed at initialization. import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. - from_service: TracingUnboundedReceiver>, + from_service: TracingUnboundedReceiver>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. @@ -1286,26 +1522,22 @@ where boot_node_ids: Arc>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc>>, - /// Protocol name used to send out block requests via - /// [`crate::request_responses::RequestResponsesBehaviour`]. - block_request_protocol_name: ProtocolName, - /// Protocol name used to send out state requests via - /// [`crate::request_responses::RequestResponsesBehaviour`]. - state_request_protocol_name: ProtocolName, - /// Protocol name used to send out warp sync requests via - /// [`crate::request_responses::RequestResponsesBehaviour`]. - warp_sync_protocol_name: Option, - /// Marker to pin the `H` generic. Serves no purpose except to not break backwards - /// compatibility. - _marker: PhantomData, + peers_notifications_sinks: Arc), NotificationsSink>>>, + /// Controller for the handler of incoming and outgoing transactions. + tx_handler_controller: transactions::TransactionsHandlerController, } impl Future for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { type Output = (); @@ -1338,6 +1570,7 @@ where Poll::Ready(None) => return Poll::Ready(()), Poll::Pending => break, }; + match msg { ServiceToWorkerMsg::AnnounceBlock(hash, data) => this .network_service @@ -1354,6 +1587,10 @@ where .behaviour_mut() .user_protocol_mut() .clear_justification_requests(), + ServiceToWorkerMsg::PropagateTransaction(hash) => + this.tx_handler_controller.propagate_transaction(hash), + ServiceToWorkerMsg::PropagateTransactions => + this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1405,6 +1642,11 @@ where .behaviour_mut() .user_protocol_mut() .remove_from_peers_set(protocol, peer_id), + ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), ServiceToWorkerMsg::Request { target, @@ -1431,7 +1673,7 @@ where .network_service .behaviour_mut() .user_protocol_mut() - .disconnect_peer(&who, protocol_name), + .disconnect_peer(&who, &protocol_name), ServiceToWorkerMsg::NewBestBlockImported(hash, number) => this .network_service .behaviour_mut() @@ -1474,84 +1716,6 @@ where } this.import_queue.import_justifications(origin, hash, nb, justifications); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockRequest { - target, - request, - pending_response, - })) => { - match this - .network_service - .behaviour() - .user_protocol() - .encode_block_request(&request) - { - Ok(data) => { - this.network_service.behaviour_mut().send_request( - &target, - &this.block_request_protocol_name, - data, - pending_response, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - request, err - ); - }, - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::StateRequest { - target, - request, - pending_response, - })) => { - match this - .network_service - .behaviour() - .user_protocol() - .encode_state_request(&request) - { - Ok(data) => { - this.network_service.behaviour_mut().send_request( - &target, - &this.state_request_protocol_name, - data, - pending_response, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: "sync", - "Failed to encode state request {:?}: {:?}", - request, err - ); - }, - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::WarpSyncRequest { - target, - request, - pending_response, - })) => match &this.warp_sync_protocol_name { - Some(name) => this.network_service.behaviour_mut().send_request( - &target, - &name, - request.encode(), - pending_response, - IfDisconnected::ImmediateError, - ), - None => { - log::warn!( - target: "sync", - "Trying to send warp sync request when no protocol is configured {:?}", - request, - ); - }, - }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, @@ -1627,51 +1791,14 @@ where }, } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { - peer, - changes, - })) => - for change in changes { - this.network_service.behaviour().user_protocol().report_peer(peer, change); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::PeerIdentify { - peer_id, - info: - IdentifyInfo { - protocol_version, - agent_version, - mut listen_addrs, - protocols, - .. - }, - })) => { - if listen_addrs.len() > 30 { - debug!( - target: "sub-libp2p", - "Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}", - peer_id, protocol_version, agent_version - ); - listen_addrs.truncate(30); - } - for addr in listen_addrs { - this.network_service - .behaviour_mut() - .add_self_reported_address_to_dht(&peer_id, &protocols, addr); - } - this.network_service - .behaviour_mut() - .user_protocol_mut() - .add_default_set_discovered_nodes(iter::once(peer_id)); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Discovered(peer_id))) => { - this.network_service - .behaviour_mut() - .user_protocol_mut() - .add_default_set_discovered_nodes(iter::once(peer_id)); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted( + protocol, + ))) => if let Some(metrics) = this.metrics.as_ref() { - metrics.kademlia_random_queries_total.inc(); + metrics + .kademlia_random_queries_total + .with_label_values(&[protocol.as_ref()]) + .inc(); }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, @@ -1792,9 +1919,6 @@ where this.event_streams.send(Event::Dht(event)); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::None)) => { - // Ignored event from lower layers. - }, Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, @@ -1834,10 +1958,14 @@ where let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", Some(ConnectionError::Handler(EitherError::A(EitherError::A( - EitherError::B(EitherError::A(PingFailure::Timeout)), + EitherError::A(EitherError::B(EitherError::A( + PingFailure::Timeout, + ))), )))) => "ping-timeout", Some(ConnectionError::Handler(EitherError::A(EitherError::A( - EitherError::A(NotifsHandlerError::SyncNotificationsClogged), + EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged, + )), )))) => "sync-notifications-clogged", Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", @@ -1879,7 +2007,7 @@ where if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { - warn!( + error!( "💔 The bootnode you want to connect to at `{}` provided a different peer ID `{}` than the one you expect `{}`.", address, obtained, @@ -2003,32 +2131,39 @@ where *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = this - .network_service - .behaviour_mut() - .user_protocol_mut() - .sync_state() - .state - .is_major_syncing(); + let is_major_syncing = + match this.network_service.behaviour_mut().user_protocol_mut().sync_state().state { + SyncState::Idle => false, + SyncState::Downloading => true, + }; + + this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - if let Some(buckets) = this.network_service.behaviour_mut().num_entries_per_kbucket() { + for (proto, buckets) in this.network_service.behaviour_mut().num_entries_per_kbucket() { for (lower_ilog2_bucket_bound, num_entries) in buckets { metrics .kbuckets_num_nodes - .with_label_values(&[&lower_ilog2_bucket_bound.to_string()]) + .with_label_values(&[proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) .set(num_entries as u64); } } - if let Some(num_entries) = this.network_service.behaviour_mut().num_kademlia_records() { - metrics.kademlia_records_count.set(num_entries as u64); + for (proto, num_entries) in this.network_service.behaviour_mut().num_kademlia_records() + { + metrics + .kademlia_records_count + .with_label_values(&[proto.as_ref()]) + .set(num_entries as u64); } - if let Some(num_entries) = + for (proto, num_entries) in this.network_service.behaviour_mut().kademlia_records_total_size() { - metrics.kademlia_records_sizes_total.set(num_entries as u64); + metrics + .kademlia_records_sizes_total + .with_label_values(&[proto.as_ref()]) + .set(num_entries as u64); } metrics .peerset_num_discovered @@ -2048,7 +2183,13 @@ impl Unpin for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { } @@ -2056,7 +2197,13 @@ where struct NetworkLink<'a, B, Client> where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { protocol: &'a mut Swarm>, } @@ -2064,7 +2211,13 @@ where impl<'a, B, Client> Link for NetworkLink<'a, B, Client> where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { fn blocks_processed( &mut self, diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index db1b6f7f6500d..4b63df00b8d66 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -59,9 +59,9 @@ pub struct Metrics { pub incoming_connections_total: Counter, pub issued_light_requests: Counter, pub kademlia_query_duration: HistogramVec, - pub kademlia_random_queries_total: Counter, - pub kademlia_records_count: Gauge, - pub kademlia_records_sizes_total: Gauge, + pub kademlia_random_queries_total: CounterVec, + pub kademlia_records_count: GaugeVec, + pub kademlia_records_sizes_total: GaugeVec, pub kbuckets_num_nodes: GaugeVec, pub listeners_local_addresses: Gauge, pub listeners_errors_total: Counter, @@ -138,24 +138,33 @@ impl Metrics { }, &["type"] )?, registry)?, - kademlia_random_queries_total: prometheus::register(Counter::new( - "substrate_sub_libp2p_kademlia_random_queries_total", - "Number of random Kademlia queries started", + kademlia_random_queries_total: prometheus::register(CounterVec::new( + Opts::new( + "substrate_sub_libp2p_kademlia_random_queries_total", + "Number of random Kademlia queries started" + ), + &["protocol"] )?, registry)?, - kademlia_records_count: prometheus::register(Gauge::new( - "substrate_sub_libp2p_kademlia_records_count", - "Number of records in the Kademlia records store", + kademlia_records_count: prometheus::register(GaugeVec::new( + Opts::new( + "substrate_sub_libp2p_kademlia_records_count", + "Number of records in the Kademlia records store" + ), + &["protocol"] )?, registry)?, - kademlia_records_sizes_total: prometheus::register(Gauge::new( - "substrate_sub_libp2p_kademlia_records_sizes_total", - "Total size of all the records in the Kademlia records store", + kademlia_records_sizes_total: prometheus::register(GaugeVec::new( + Opts::new( + "substrate_sub_libp2p_kademlia_records_sizes_total", + "Total size of all the records in the Kademlia records store" + ), + &["protocol"] )?, registry)?, kbuckets_num_nodes: prometheus::register(GaugeVec::new( Opts::new( "substrate_sub_libp2p_kbuckets_num_nodes", "Number of nodes per kbucket per Kademlia instance" ), - &["lower_ilog2_bucket_bound"] + &["protocol", "lower_ilog2_bucket_bound"] )?, registry)?, listeners_local_addresses: prometheus::register(Gauge::new( "substrate_sub_libp2p_listeners_local_addresses", diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 4144d7f19551e..c95b46af4cefa 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -31,10 +31,11 @@ //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. +use crate::Event; + use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; -use sc_network_common::protocol::event::Event; use std::{ cell::RefCell, fmt, diff --git a/client/network/common/src/service/signature.rs b/client/network/src/service/signature.rs similarity index 96% rename from client/network/common/src/service/signature.rs rename to client/network/src/service/signature.rs index 602ef3d82979a..d21d28a3007b5 100644 --- a/client/network/common/src/service/signature.rs +++ b/client/network/src/service/signature.rs @@ -18,10 +18,7 @@ // // If you read this, you are very thorough, congratulations. -use libp2p::{ - identity::{error::SigningError, Keypair, PublicKey}, - PeerId, -}; +use super::*; /// A result of signing a message with a network identity. Since `PeerId` is potentially a hash of a /// `PublicKey`, you need to reveal the `PublicKey` next to the signature, so the verifier can check diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests.rs similarity index 50% rename from client/network/src/service/tests/service.rs rename to client/network/src/service/tests.rs index 90945fdcef2cf..de474ee8fe4d0 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests.rs @@ -16,24 +16,146 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, service::tests::TestNetworkBuilder, NetworkService}; +use crate::{config, Event, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; -use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, - protocol::event::Event, - service::{NetworkNotification, NetworkPeers, NetworkStateInfo}, +use sc_network_common::config::ProtocolId; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + ChainSync, }; -use std::{sync::Arc, time::Duration}; +use sp_consensus::block_validation::DefaultBlockAnnounceValidator; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use std::{borrow::Cow, sync::Arc, time::Duration}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::Hash, >; -const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; -const PROTOCOL_NAME: &str = "/foo"; +/// Builds a full node to be used for testing. Returns the node service and its associated events +/// stream. +/// +/// > **Note**: We return the events stream in order to not possibly lose events between the +/// > construction of the service and the moment the events stream is grabbed. +fn build_test_full_node( + network_config: config::NetworkConfiguration, +) -> (Arc, impl Stream) { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + + #[async_trait::async_trait] + impl sc_consensus::Verifier for PassThroughVerifier { + async fn verify( + &mut self, + mut block: sc_consensus::BlockImportParams, + ) -> Result< + ( + sc_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = block + .header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) + }) + }) + .map(|blob| { + vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] + }); + + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) + } + } + + let import_queue = Box::new(sc_consensus::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let protocol_id = ProtocolId::from("/test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = StateRequestHandler::new(&protocol_id, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let chain_sync = ChainSync::new( + match network_config.sync_mode { + config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + network_config.max_parallel_downloads, + None, + ) + .unwrap(); + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), + network_config, + chain: client.clone(), + transaction_pool: Arc::new(config::EmptyTransactionPool), + protocol_id, + import_queue, + chain_sync: Box::new(chain_sync), + metrics_registry: None, + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync_protocol_config: None, + }) + .unwrap(); + + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) +} + +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. @@ -45,21 +167,35 @@ fn build_nodes_one_proto() -> ( ) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, events_stream1) = TestNetworkBuilder::new() - .with_listen_addresses(vec![listen_addr.clone()]) - .build() - .start_network(); + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); - let (node2, events_stream2) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id(), - }], - ..Default::default() - }) - .build() - .start_network(); + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + ..Default::default() + }, + }], + listen_addresses: vec![], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); (node1, events_stream1, node2, events_stream2) } @@ -74,15 +210,15 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), + node2.local_peer_id().clone(), + PROTOCOL_NAME, b"hello world".to_vec(), ); } for _ in 0..(rand::random::() % 5) { node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), + node1.local_peer_id().clone(), + PROTOCOL_NAME, b"hello world".to_vec(), ); } @@ -108,25 +244,25 @@ fn notifications_state_consistent() { // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), + node2.local_peer_id().clone(), + PROTOCOL_NAME, b"hello world".to_vec(), ); } if rand::random::() % 5 >= 3 { node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), + node1.local_peer_id().clone(), + PROTOCOL_NAME, b"hello world".to_vec(), ); } // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id(), PROTOCOL_NAME.into()); + node1.disconnect_peer(node2.local_peer_id().clone(), PROTOCOL_NAME); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); + node2.disconnect_peer(node1.local_peer_id().clone(), PROTOCOL_NAME); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -150,55 +286,55 @@ fn notifications_state_consistent() { future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { + if protocol == PROTOCOL_NAME { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; - assert_eq!(remote, node2.local_peer_id()); + assert_eq!(remote, *node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { + if protocol == PROTOCOL_NAME { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; - assert_eq!(remote, node1.local_peer_id()); + assert_eq!(remote, *node1.local_peer_id()); }, future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { + if protocol == PROTOCOL_NAME { assert!(node1_to_node2_open); node1_to_node2_open = false; - assert_eq!(remote, node2.local_peer_id()); + assert_eq!(remote, *node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { + if protocol == PROTOCOL_NAME { assert!(node2_to_node1_open); node2_to_node1_open = false; - assert_eq!(remote, node1.local_peer_id()); + assert_eq!(remote, *node1.local_peer_id()); }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); - assert_eq!(remote, node2.local_peer_id()); + assert_eq!(remote, *node2.local_peer_id()); if rand::random::() % 5 >= 4 { node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), + node2.local_peer_id().clone(), + PROTOCOL_NAME, b"hello world".to_vec(), ); } }, future::Either::Right(Event::NotificationsReceived { remote, .. }) => { assert!(node2_to_node1_open); - assert_eq!(remote, node1.local_peer_id()); + assert_eq!(remote, *node1.local_peer_id()); if rand::random::() % 5 >= 4 { node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), + node1.local_peer_id().clone(), + PROTOCOL_NAME, b"hello world".to_vec(), ); } @@ -216,33 +352,46 @@ fn notifications_state_consistent() { }); } -#[async_std::test] -async fn lots_of_incoming_peers_works() { +#[test] +fn lots_of_incoming_peers_works() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (main_node, _) = TestNetworkBuilder::new() - .with_listen_addresses(vec![listen_addr.clone()]) - .with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() }) - .build() - .start_network(); + let (main_node, _) = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, + }], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); - let main_node_peer_id = main_node.local_peer_id(); + let main_node_peer_id = *main_node.local_peer_id(); // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. let mut background_tasks_to_wait = Vec::new(); for _ in 0..32 { - let (_dialing_node, event_stream) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr.clone(), - peer_id: main_node_peer_id, - }], - ..Default::default() - }) - .build() - .start_network(); + let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id, + }], + ..Default::default() + }, + }], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); background_tasks_to_wait.push(async_std::task::spawn(async move { // Create a dummy timer that will "never" fire, and that will be overwritten when we @@ -277,7 +426,7 @@ async fn lots_of_incoming_peers_works() { })); } - future::join_all(background_tasks_to_wait).await; + futures::executor::block_on(async move { future::join_all(background_tasks_to_wait).await }); } #[test] @@ -298,7 +447,7 @@ fn notifications_back_pressure() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => for message in messages { - assert_eq!(message.0, PROTOCOL_NAME.into()); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; }, @@ -322,13 +471,8 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id, PROTOCOL_NAME.into()).unwrap(); - notif - .ready() - .await - .unwrap() - .send(format!("hello #{}", num).into_bytes()) - .unwrap(); + let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); + notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); } receiver.await; @@ -339,42 +483,48 @@ fn notifications_back_pressure() { fn fallback_name_working() { // Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether // they can connect. - const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME"; + + const NEW_PROTOCOL_NAME: Cow<'static, str> = + Cow::Borrowed("/new-shiny-protocol-that-isnt-PROTOCOL_NAME"); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, mut events_stream1) = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: NEW_PROTOCOL_NAME.into(), - fallback_names: vec![PROTOCOL_NAME.into()], - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }], - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }) - .build() - .start_network(); - - let (_, mut events_stream2) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id(), - }], - ..Default::default() - }) - .build() - .start_network(); + + let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: NEW_PROTOCOL_NAME.clone(), + fallback_names: vec![PROTOCOL_NAME], + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + ..Default::default() + }, + }], + listen_addresses: vec![], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); let receiver = async_std::task::spawn(async move { // Wait for the `NotificationStreamOpened`. loop { match events_stream2.next().await.unwrap() { Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { - assert_eq!(protocol, PROTOCOL_NAME.into()); + assert_eq!(protocol, PROTOCOL_NAME); assert_eq!(negotiated_fallback, None); break }, @@ -388,9 +538,9 @@ fn fallback_name_working() { loop { match events_stream1.next().await.unwrap() { Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } - if protocol == NEW_PROTOCOL_NAME.into() => + if protocol == NEW_PROTOCOL_NAME => { - assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into())); + assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); break }, _ => {}, @@ -401,60 +551,16 @@ fn fallback_name_working() { }); } -// Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement -// protocol name and verify that `SyncDisconnected` event is emitted -#[async_std::test] -async fn disconnect_sync_peer_using_block_announcement_protocol_name() { - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); - - async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { - let mut notif_received = false; - let mut sync_received = false; - - while !notif_received || !sync_received { - match stream.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => notif_received = true, - Event::SyncConnected { .. } => sync_received = true, - _ => {}, - }; - } - } - - wait_for_events(&mut events_stream1).await; - wait_for_events(&mut events_stream2).await; - - // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted - node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); - assert!(std::matches!( - events_stream2.next().await, - Some(Event::NotificationStreamClosed { .. }) - )); - let _ = events_stream2.next().await; // ignore the reopen event - - // now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is - // emitted - node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); - assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); -} - #[test] #[should_panic(expected = "don't match the transport")] fn ensure_listen_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] @@ -462,122 +568,82 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { fn ensure_listen_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] #[should_panic(expected = "don't match the transport")] fn ensure_boot_node_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let boot_node = MultiaddrWithPeerId { + let boot_node = config::MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], peer_id: PeerId::random(), }; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - boot_nodes: vec![boot_node], - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + boot_nodes: vec![boot_node], + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] #[should_panic(expected = "don't match the transport")] fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let boot_node = MultiaddrWithPeerId { + let boot_node = config::MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Memory(rand::random::())], peer_id: PeerId::random(), }; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - boot_nodes: vec![boot_node], - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + boot_nodes: vec![boot_node], + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] #[should_panic(expected = "don't match the transport")] fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let reserved_node = MultiaddrWithPeerId { + let reserved_node = config::MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], peer_id: PeerId::random(), }; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - default_peers_set: SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + default_peers_set: config::SetConfig { + reserved_nodes: vec![reserved_node], + ..Default::default() + }, + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] #[should_panic(expected = "don't match the transport")] fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let reserved_node = MultiaddrWithPeerId { + let reserved_node = config::MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Memory(rand::random::())], peer_id: PeerId::random(), }; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - default_peers_set: SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + default_peers_set: config::SetConfig { + reserved_nodes: vec![reserved_node], + ..Default::default() + }, + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] @@ -586,20 +652,12 @@ fn ensure_public_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let public_address = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - public_addresses: vec![public_address], - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + public_addresses: vec![public_address], + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } #[test] @@ -608,17 +666,9 @@ fn ensure_public_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; let public_address = config::build_multiaddr![Memory(rand::random::())]; - let _ = TestNetworkBuilder::new() - .with_config(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - public_addresses: vec![public_address], - ..config::NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ) - }) - .build() - .start_network(); + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + public_addresses: vec![public_address], + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); } diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs deleted file mode 100644 index 21149459413f4..0000000000000 --- a/client/network/src/service/tests/chain_sync.rs +++ /dev/null @@ -1,402 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::{ - config, - service::tests::{TestNetworkBuilder, BLOCK_ANNOUNCE_PROTO_NAME}, -}; - -use futures::prelude::*; -use libp2p::PeerId; -use sc_block_builder::BlockBuilderProvider; -use sc_client_api::HeaderBackend; -use sc_consensus::JustificationSyncLink; -use sc_network_common::{ - config::{MultiaddrWithPeerId, SetConfig}, - protocol::event::Event, - service::NetworkSyncForkRequest, - sync::{SyncState, SyncStatus}, -}; -use sc_network_sync::{mock::MockChainSync, service::mock::MockChainSyncInterface, ChainSync}; -use sp_core::H256; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as _}, -}; -use std::{ - iter, - sync::{Arc, RwLock}, - task::Poll, - time::Duration, -}; -use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; - -fn set_default_expecations_no_peers( - chain_sync: &mut MockChainSync, -) { - chain_sync.expect_block_requests().returning(|| Box::new(iter::empty())); - chain_sync.expect_state_request().returning(|| None); - chain_sync.expect_justification_requests().returning(|| Box::new(iter::empty())); - chain_sync.expect_warp_sync_request().returning(|| None); - chain_sync.expect_poll().returning(|_| Poll::Pending); - chain_sync.expect_status().returning(|| SyncStatus { - state: SyncState::Idle, - best_seen_block: None, - num_peers: 0u32, - queued_blocks: 0u32, - state_sync: None, - warp_sync: None, - }); -} - -#[async_std::test] -async fn normal_network_poll_no_peers() { - // build `ChainSync` and set default expectations for it - let mut chain_sync = - Box::new(MockChainSync::::new()); - set_default_expecations_no_peers(&mut chain_sync); - - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // poll the network once - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn request_justification() { - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - // build `ChainSync` and verify that call to `request_justification()` is made - let mut chain_sync = - Box::new(MockChainSync::::new()); - - let hash = H256::random(); - let number = 1337u64; - - chain_sync - .expect_request_justification() - .withf(move |in_hash, in_number| &hash == in_hash && &number == in_number) - .once() - .returning(|_, _| ()); - - set_default_expecations_no_peers(&mut chain_sync); - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // send "request justifiction" message and poll the network - network.service().request_justification(&hash, number); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn clear_justification_requests() { - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - // build `ChainSync` and verify that call to `clear_justification_requests()` is made - let mut chain_sync = - Box::new(MockChainSync::::new()); - - chain_sync.expect_clear_justification_requests().once().returning(|| ()); - - set_default_expecations_no_peers(&mut chain_sync); - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // send "request justifiction" message and poll the network - network.service().clear_justification_requests(); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn set_sync_fork_request() { - // build `ChainSync` and set default expectations for it - let mut chain_sync = - Box::new(MockChainSync::::new()); - set_default_expecations_no_peers(&mut chain_sync); - - // build `ChainSyncInterface` provider and verify that the `set_sync_fork_request()` - // call is delegated to `ChainSyncInterface` (which eventually forwards it to `ChainSync`) - let mut chain_sync_service = - MockChainSyncInterface::::new(); - - let hash = H256::random(); - let number = 1337u64; - let peers = (0..3).map(|_| PeerId::random()).collect::>(); - let copy_peers = peers.clone(); - - chain_sync_service - .expect_set_sync_fork_request() - .withf(move |in_peers, in_hash, in_number| { - &peers == in_peers && &hash == in_hash && &number == in_number - }) - .once() - .returning(|_, _, _| ()); - - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, Box::new(chain_sync_service))) - .build(); - - // send "set sync fork request" message and poll the network - network.service().set_sync_fork_request(copy_peers, hash, number); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn on_block_finalized() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - // build `ChainSync` and verify that call to `on_block_finalized()` is made - let mut chain_sync = - Box::new(MockChainSync::::new()); - - let at = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap().hash(); - let block = client - .new_block_at(&BlockId::Hash(at), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - let header = block.header.clone(); - let block_number = *header.number(); - let hash = block.hash(); - - chain_sync - .expect_on_block_finalized() - .withf(move |in_hash, in_number| &hash == in_hash && &block_number == in_number) - .once() - .returning(|_, _| ()); - - set_default_expecations_no_peers(&mut chain_sync); - let mut network = TestNetworkBuilder::new() - .with_client(client) - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // send "set sync fork request" message and poll the network - network.network().on_block_finalized(hash, header); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -// report from mock import queue that importing a justification was not successful -// and verify that connection to the peer is closed -#[async_std::test] -async fn invalid_justification_imported() { - struct DummyImportQueue( - Arc< - RwLock< - Option<( - PeerId, - substrate_test_runtime_client::runtime::Hash, - sp_runtime::traits::NumberFor, - )>, - >, - >, - ); - - impl sc_consensus::ImportQueue for DummyImportQueue { - fn import_blocks( - &mut self, - _origin: sp_consensus::BlockOrigin, - _blocks: Vec< - sc_consensus::IncomingBlock, - >, - ) { - } - - fn import_justifications( - &mut self, - _who: sc_consensus::import_queue::RuntimeOrigin, - _hash: substrate_test_runtime_client::runtime::Hash, - _number: sp_runtime::traits::NumberFor, - _justifications: sp_runtime::Justifications, - ) { - } - - fn poll_actions( - &mut self, - _cx: &mut futures::task::Context, - link: &mut dyn sc_consensus::Link, - ) { - if let Some((peer, hash, number)) = *self.0.read().unwrap() { - link.justification_imported(peer, &hash, number, false); - } - } - } - - let justification_info = Arc::new(RwLock::new(None)); - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (service1, mut event_stream1) = TestNetworkBuilder::new() - .with_import_queue(Box::new(DummyImportQueue(justification_info.clone()))) - .with_listen_addresses(vec![listen_addr.clone()]) - .build() - .start_network(); - - let (service2, mut event_stream2) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: service1.local_peer_id, - }], - ..Default::default() - }) - .build() - .start_network(); - - async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { - let mut notif_received = false; - let mut sync_received = false; - while !notif_received || !sync_received { - match stream.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => notif_received = true, - Event::SyncConnected { .. } => sync_received = true, - _ => {}, - }; - } - } - - wait_for_events(&mut event_stream1).await; - wait_for_events(&mut event_stream2).await; - - { - let mut info = justification_info.write().unwrap(); - *info = Some((service2.local_peer_id, H256::random(), 1337u64)); - } - - let wait_disconnection = async { - while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {} - }; - - if async_std::future::timeout(Duration::from_secs(5), wait_disconnection) - .await - .is_err() - { - panic!("did not receive disconnection event in time"); - } -} - -#[async_std::test] -async fn disconnect_peer_using_chain_sync_handle() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (chain_sync_network_provider, chain_sync_network_handle) = - sc_network_sync::service::network::NetworkServiceProvider::new(); - let handle_clone = chain_sync_network_handle.clone(); - - let (chain_sync, chain_sync_service) = ChainSync::new( - sc_network_common::sync::SyncMode::Full, - client.clone(), - Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), - 1u32, - None, - chain_sync_network_handle.clone(), - ) - .unwrap(); - - let (node1, mut event_stream1) = TestNetworkBuilder::new() - .with_listen_addresses(vec![listen_addr.clone()]) - .with_chain_sync((Box::new(chain_sync), chain_sync_service)) - .with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle)) - .with_client(client.clone()) - .build() - .start_network(); - - let (node2, mut event_stream2) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id, - }], - ..Default::default() - }) - .with_client(client.clone()) - .build() - .start_network(); - - async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { - let mut notif_received = false; - let mut sync_received = false; - while !notif_received || !sync_received { - match stream.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => notif_received = true, - Event::SyncConnected { .. } => sync_received = true, - _ => {}, - }; - } - } - - wait_for_events(&mut event_stream1).await; - wait_for_events(&mut event_stream2).await; - - handle_clone.disconnect_peer(node2.local_peer_id, BLOCK_ANNOUNCE_PROTO_NAME.into()); - - let wait_disconnection = async { - while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {} - }; - - if async_std::future::timeout(Duration::from_secs(5), wait_disconnection) - .await - .is_err() - { - panic!("did not receive disconnection event in time"); - } -} diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs deleted file mode 100644 index ef25616a07b0d..0000000000000 --- a/client/network/src/service/tests/mod.rs +++ /dev/null @@ -1,323 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker}; - -use futures::prelude::*; -use libp2p::Multiaddr; -use sc_client_api::{BlockBackend, HeaderBackend}; -use sc_consensus::ImportQueue; -use sc_network_common::{ - config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, - TransportConfig, - }, - protocol::{event::Event, role::Roles}, - service::NetworkEventStream, - sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT}, -}; -use sc_network_light::light_client_requests::handler::LightClientRequestHandler; -use sc_network_sync::{ - block_request_handler::BlockRequestHandler, - service::network::{NetworkServiceHandle, NetworkServiceProvider}, - state_request_handler::StateRequestHandler, - ChainSync, -}; -use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; -use std::sync::Arc; -use substrate_test_runtime_client::{ - runtime::{Block as TestBlock, Hash as TestHash}, - TestClient, TestClientBuilder, TestClientBuilderExt as _, -}; - -#[cfg(test)] -mod chain_sync; -#[cfg(test)] -mod service; - -type TestNetworkWorker = NetworkWorker; -type TestNetworkService = NetworkService; - -const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; -const PROTOCOL_NAME: &str = "/foo"; - -struct TestNetwork { - network: TestNetworkWorker, -} - -impl TestNetwork { - pub fn new(network: TestNetworkWorker) -> Self { - Self { network } - } - - pub fn service(&self) -> &Arc { - &self.network.service() - } - - pub fn network(&mut self) -> &mut TestNetworkWorker { - &mut self.network - } - - pub fn start_network( - self, - ) -> (Arc, (impl Stream + std::marker::Unpin)) { - let worker = self.network; - let service = worker.service().clone(); - let event_stream = service.event_stream("test"); - - async_std::task::spawn(async move { - futures::pin_mut!(worker); - let _ = worker.await; - }); - - (service, event_stream) - } -} - -struct TestNetworkBuilder { - import_queue: Option>>, - client: Option>, - listen_addresses: Vec, - set_config: Option, - chain_sync: Option<(Box>, Box>)>, - chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>, - config: Option, -} - -impl TestNetworkBuilder { - pub fn new() -> Self { - Self { - import_queue: None, - client: None, - listen_addresses: Vec::new(), - set_config: None, - chain_sync: None, - chain_sync_network: None, - config: None, - } - } - - pub fn with_client(mut self, client: Arc) -> Self { - self.client = Some(client); - self - } - - pub fn with_config(mut self, config: config::NetworkConfiguration) -> Self { - self.config = Some(config); - self - } - - pub fn with_listen_addresses(mut self, addresses: Vec) -> Self { - self.listen_addresses = addresses; - self - } - - pub fn with_set_config(mut self, set_config: SetConfig) -> Self { - self.set_config = Some(set_config); - self - } - - pub fn with_chain_sync( - mut self, - chain_sync: (Box>, Box>), - ) -> Self { - self.chain_sync = Some(chain_sync); - self - } - - pub fn with_chain_sync_network( - mut self, - chain_sync_network: (NetworkServiceProvider, NetworkServiceHandle), - ) -> Self { - self.chain_sync_network = Some(chain_sync_network); - self - } - - pub fn with_import_queue(mut self, import_queue: Box>) -> Self { - self.import_queue = Some(import_queue); - self - } - - pub fn build(mut self) -> TestNetwork { - let client = self.client.as_mut().map_or( - Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), - |v| v.clone(), - ); - - let network_config = self.config.unwrap_or(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: self.set_config.unwrap_or_default(), - }], - listen_addresses: self.listen_addresses, - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); - - #[derive(Clone)] - struct PassThroughVerifier(bool); - - #[async_trait::async_trait] - impl sc_consensus::Verifier for PassThroughVerifier { - async fn verify( - &mut self, - mut block: sc_consensus::BlockImportParams, - ) -> Result< - ( - sc_consensus::BlockImportParams, - Option)>>, - ), - String, - > { - let maybe_keys = block - .header - .digest() - .log(|l| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( - b"babe", - )) - }) - }) - .map(|blob| { - vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] - }); - - block.finalized = self.0; - block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); - Ok((block, maybe_keys)) - } - } - - let import_queue = self.import_queue.unwrap_or(Box::new(sc_consensus::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - &sp_core::testing::TaskExecutor::new(), - None, - ))); - - let (chain_sync_network_provider, chain_sync_network_handle) = - self.chain_sync_network.unwrap_or(NetworkServiceProvider::new()); - - let (chain_sync, chain_sync_service) = self.chain_sync.unwrap_or({ - let (chain_sync, chain_sync_service) = ChainSync::new( - match network_config.sync_mode { - config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { - skip_proofs, - storage_chain_mode, - }, - config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, - client.clone(), - Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), - network_config.max_parallel_downloads, - None, - chain_sync_network_handle, - ) - .unwrap(); - - (Box::new(chain_sync), chain_sync_service) - }); - - let protocol_id = ProtocolId::from("test-protocol-name"); - let fork_id = Some(String::from("test-fork-id")); - - let block_request_protocol_config = { - let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let state_request_protocol_config = { - let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, None, client.clone(), 50); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let light_client_request_protocol_config = { - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, None, client.clone()); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let block_announce_config = NonDefaultSetConfig { - notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), - fallback_names: vec![], - max_notification_size: 1024 * 1024, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< - substrate_test_runtime_client::runtime::Block, - >::build( - Roles::from(&config::Role::Full), - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ))), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - }; - - let worker = NetworkWorker::< - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::Hash, - substrate_test_runtime_client::TestClient, - >::new(config::Params { - block_announce_config, - role: config::Role::Full, - executor: None, - network_config, - chain: client.clone(), - protocol_id, - fork_id, - import_queue, - chain_sync, - chain_sync_service, - metrics_registry: None, - block_request_protocol_config, - state_request_protocol_config, - light_client_request_protocol_config, - warp_sync_protocol_config: None, - request_response_protocol_configs: Vec::new(), - }) - .unwrap(); - - let service = worker.service().clone(); - async_std::task::spawn(async move { - let _ = chain_sync_network_provider.run(service).await; - }); - - TestNetwork::new(worker) - } -} diff --git a/client/network/transactions/src/lib.rs b/client/network/src/transactions.rs similarity index 82% rename from client/network/transactions/src/lib.rs rename to client/network/src/transactions.rs index 5239a94ef23f3..043bdeff7ebfc 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/src/transactions.rs @@ -26,34 +26,49 @@ //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. -use crate::config::*; +use crate::{ + config::{self, TransactionImport, TransactionImportFuture, TransactionPool}, + error, + protocol::message, + service::NetworkService, + utils::{interval, LruHashSet}, + Event, ExHashT, ObservedRole, +}; + use codec::{Decode, Encode}; use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::{ - config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, - error, - protocol::{event::Event, role::ObservedRole, ProtocolName}, - service::{NetworkEventStream, NetworkNotification, NetworkPeers}, - utils::{interval, LruHashSet}, - ExHashT, -}; +use sc_network_common::config::ProtocolId; use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, collections::{hash_map::Entry, HashMap}, iter, num::NonZeroUsize, pin::Pin, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, task::Poll, + time, }; -pub mod config; +/// Interval at which we propagate transactions; +const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximum allowed size for a transactions notification. +const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; -/// A set of transactions. -pub type Transactions = Vec; +/// Maximum number of transaction validation request we keep at any moment. +const MAX_PENDING_TRANSACTIONS: usize = 8192; mod rep { use sc_peerset::ReputationChange as Rep; @@ -111,43 +126,26 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { - protocol_name: ProtocolName, - fallback_protocol_names: Vec, + protocol_name: Cow<'static, str>, } impl TransactionsHandlerPrototype { /// Create a new instance. - pub fn new>( - protocol_id: ProtocolId, - genesis_hash: Hash, - fork_id: Option<&str>, - ) -> Self { - let genesis_hash = genesis_hash.as_ref(); - let protocol_name = if let Some(fork_id) = fork_id { - format!("/{}/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash), fork_id) - } else { - format!("/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash)) - }; - let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); - - Self { - protocol_name: protocol_name.into(), - fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), - } + pub fn new(protocol_id: ProtocolId) -> Self { + Self { protocol_name: format!("/{}/transactions/1", protocol_id.as_ref()).into() } } /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> NonDefaultSetConfig { - NonDefaultSetConfig { + pub fn set_config(&self) -> config::NonDefaultSetConfig { + config::NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), - fallback_names: self.fallback_protocol_names.clone(), + fallback_names: Vec::new(), max_notification_size: MAX_TRANSACTIONS_SIZE, - handshake: None, - set_config: SetConfig { + set_config: config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, + non_reserved_mode: config::NonReservedPeerMode::Deny, }, } } @@ -156,25 +154,23 @@ impl TransactionsHandlerPrototype { /// the behaviour of the handler while it's running. /// /// Important: the transactions handler is initially disabled and doesn't gossip transactions. - /// Gossiping is enabled when major syncing is done. - pub fn build< - B: BlockT + 'static, - H: ExHashT, - S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, - >( + /// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it. + pub fn build( self, - service: S, + service: Arc>, transaction_pool: Arc>, metrics_registry: Option<&Registry>, - ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let event_stream = service.event_stream("transactions-handler"); + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + let event_stream = service.event_stream("transactions-handler").boxed(); let (to_handler, from_controller) = mpsc::unbounded(); + let gossip_enabled = Arc::new(AtomicBool::new(false)); let handler = TransactionsHandler { protocol_name: self.protocol_name, propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), + gossip_enabled: gossip_enabled.clone(), service, event_stream, peers: HashMap::new(), @@ -187,7 +183,7 @@ impl TransactionsHandlerPrototype { }, }; - let controller = TransactionsHandlerController { to_handler }; + let controller = TransactionsHandlerController { to_handler, gossip_enabled }; Ok((handler, controller)) } @@ -196,9 +192,15 @@ impl TransactionsHandlerPrototype { /// Controls the behaviour of a [`TransactionsHandler`] it is connected to. pub struct TransactionsHandlerController { to_handler: mpsc::UnboundedSender>, + gossip_enabled: Arc, } impl TransactionsHandlerController { + /// Controls whether transactions are being gossiped on the network. + pub fn set_gossip_enabled(&mut self, enabled: bool) { + self.gossip_enabled.store(enabled, Ordering::Relaxed); + } + /// You may call this when new transactions are imported by the transaction pool. /// /// All transactions will be fetched from the `TransactionPool` that was passed at @@ -222,12 +224,8 @@ enum ToHandler { } /// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. -pub struct TransactionsHandler< - B: BlockT + 'static, - H: ExHashT, - S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, -> { - protocol_name: ProtocolName, +pub struct TransactionsHandler { + protocol_name: Cow<'static, str>, /// Interval at which we call `propagate_transactions`. propagate_timeout: Pin + Send>>, /// Pending transactions verification tasks. @@ -238,12 +236,13 @@ pub struct TransactionsHandler< /// multiple times concurrently. pending_transactions_peers: HashMap>, /// Network service to use to send messages and manage peers. - service: S, + service: Arc>, /// Stream of networking events. event_stream: Pin + Send>>, // All connected peers peers: HashMap>, transaction_pool: Arc>, + gossip_enabled: Arc, from_controller: mpsc::UnboundedReceiver>, /// Prometheus metrics. metrics: Option, @@ -257,12 +256,7 @@ struct Peer { role: ObservedRole, } -impl TransactionsHandler -where - B: BlockT + 'static, - H: ExHashT, - S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, -{ +impl TransactionsHandler { /// Turns the [`TransactionsHandler`] into a future that should run forever and not be /// interrupted. pub async fn run(mut self) { @@ -344,9 +338,9 @@ where continue } - if let Ok(m) = - as Decode>::decode(&mut message.as_ref()) - { + if let Ok(m) = as Decode>::decode( + &mut message.as_ref(), + ) { self.on_transactions(remote, m); } else { warn!(target: "sub-libp2p", "Failed to decode transactions list"); @@ -360,10 +354,10 @@ where } /// Called when peer sends us new transactions - fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { - // Accept transactions only when node is not major syncing - if self.service.is_major_syncing() { - trace!(target: "sync", "{} Ignoring transactions while major syncing", who); + fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + trace!(target: "sync", "{} Ignoring transactions while disabled", who); return } @@ -412,12 +406,11 @@ where /// Propagate one transaction. pub fn propagate_transaction(&mut self, hash: &H) { - // Accept transactions only when node is not major syncing - if self.service.is_major_syncing() { + debug!(target: "sync", "Propagating transaction [{:?}]", hash); + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { return } - - debug!(target: "sync", "Propagating transaction [{:?}]", hash); if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); self.transaction_pool.on_broadcasted(propagated_to); @@ -464,11 +457,10 @@ where /// Call when we must propagate ready transactions to peers. fn propagate_transactions(&mut self) { - // Accept transactions only when node is not major syncing - if self.service.is_major_syncing() { + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { return } - debug!(target: "sync", "Propagating transactions"); let transactions = self.transaction_pool.transactions(); let propagated_to = self.do_propagate_transactions(&transactions); diff --git a/client/network/common/src/utils.rs b/client/network/src/utils.rs similarity index 100% rename from client/network/common/src/utils.rs rename to client/network/src/utils.rs diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index bcd6cf10275fe..3e3526146400a 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -14,17 +14,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.11" +prost-build = "0.10" [dependencies] -array-bytes = "4.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = [ + "derive", +] } futures = "0.3.21" -libp2p = "0.49.0" +libp2p = "0.46.1" log = "0.4.17" -lru = "0.8.1" -mockall = "0.11.2" -prost = "0.11" +lru = "0.7.5" +prost = "0.10" smallvec = "1.8.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } @@ -32,7 +32,6 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-arithmetic = { version = "5.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } @@ -41,7 +40,6 @@ sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/final sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -async-std = { version = "1.11.0", features = ["attributes"] } quickcheck = { version = "1.0.3", default-features = false } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index b5f8b6b73bce9..30cbb32289d66 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -41,7 +41,6 @@ use sp_runtime::{ use std::{ cmp::min, hash::{Hash, Hasher}, - num::NonZeroUsize, sync::Arc, time::Duration, }; @@ -63,15 +62,9 @@ mod rep { } /// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config>( - protocol_id: &ProtocolId, - genesis_hash: Hash, - fork_id: Option<&str>, -) -> ProtocolConfig { +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(genesis_hash, fork_id).into(), - fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) - .collect(), + name: generate_protocol_name(protocol_id).into(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(20), @@ -79,18 +72,8 @@ pub fn generate_protocol_config>( } } -/// Generate the block protocol name from the genesis hash and fork id. -fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!("/{}/{}/sync/2", array_bytes::bytes2hex("", genesis_hash), fork_id) - } else { - format!("/{}/sync/2", array_bytes::bytes2hex("", genesis_hash)) - } -} - -/// Generate the legacy block protocol name from chain specific protocol identifier. -fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the block protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/sync/2", protocol_id.as_ref()) } @@ -146,7 +129,6 @@ where /// Create a new [`BlockRequestHandler`]. pub fn new( protocol_id: &ProtocolId, - fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -154,20 +136,10 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config( - protocol_id, - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - fork_id, - ); + let mut protocol_config = generate_protocol_config(protocol_id); protocol_config.inbound_queue = Some(tx); - let capacity = - NonZeroUsize::new(num_peer_hint.max(1) * 2).expect("cache capacity is not zero"); - let seen_requests = LruCache::new(capacity); + let seen_requests = LruCache::new(num_peer_hint * 2); (Self { client, request_receiver, seen_requests }, protocol_config) } @@ -334,8 +306,11 @@ where let number = *header.number(); let hash = header.hash(); let parent_hash = *header.parent_hash(); - let justifications = - if get_justification { self.client.justifications(hash)? } else { None }; + let justifications = if get_justification { + self.client.justifications(&BlockId::Hash(hash))? + } else { + None + }; let (justifications, justification, is_empty_justification) = if support_multiple_justifications { @@ -364,7 +339,7 @@ where }; let body = if get_body { - match self.client.block_body(hash)? { + match self.client.block_body(&BlockId::Hash(hash))? { Some(mut extrinsics) => extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), None => { @@ -377,7 +352,7 @@ where }; let indexed_body = if get_indexed_body { - match self.client.block_indexed_body(hash)? { + match self.client.block_indexed_body(&BlockId::Hash(hash))? { Some(transactions) => transactions, None => { log::trace!( @@ -406,20 +381,11 @@ where indexed_body, }; - let new_total_size = total_size + - block_data.body.iter().map(|ex| ex.len()).sum::() + - block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); - - // Send at least one block, but make sure to not exceed the limit. - if !blocks.is_empty() && new_total_size > MAX_BODY_BYTES { - break - } - - total_size = new_total_size; - + total_size += block_data.body.iter().map(|ex| ex.len()).sum::(); + total_size += block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); blocks.push(block_data); - if blocks.len() >= max_blocks as usize { + if blocks.len() >= max_blocks as usize || total_size > MAX_BODY_BYTES { break } diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index b8acd61a2009f..5fb1484675071 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -290,51 +290,51 @@ mod test { let peer2 = PeerId::random(); let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(1..41)); - assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(41..81)); - assert_eq!(bc.needed_blocks(peer2, 40, 150, 0, 1, 200), Some(81..121)); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121)); bc.clear_peer_download(&peer1); - bc.insert(41, blocks[41..81].to_vec(), peer1); + bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); assert_eq!(bc.ready_blocks(1), vec![]); - assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(121..151)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151)); bc.clear_peer_download(&peer0); - bc.insert(1, blocks[1..11].to_vec(), peer0); + bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); - assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(11..41)); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41)); assert_eq!( bc.ready_blocks(1), blocks[1..11] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) .collect::>() ); bc.clear_peer_download(&peer0); - bc.insert(11, blocks[11..41].to_vec(), peer0); + bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); let ready = bc.ready_blocks(12); assert_eq!( ready[..30], blocks[11..41] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) .collect::>()[..] ); assert_eq!( ready[30..], blocks[41..81] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) .collect::>()[..] ); bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2, 40, 150, 80, 1, 200), Some(81..121)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121)); bc.clear_peer_download(&peer2); - bc.insert(81, blocks[81..121].to_vec(), peer2); + bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); bc.clear_peer_download(&peer1); - bc.insert(121, blocks[121..150].to_vec(), peer1); + bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); assert_eq!(bc.ready_blocks(80), vec![]); let ready = bc.ready_blocks(81); @@ -342,14 +342,14 @@ mod test { ready[..40], blocks[81..121] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer2) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }) .collect::>()[..] ); assert_eq!( ready[40..], blocks[121..150] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) .collect::>()[..] ); } @@ -365,10 +365,10 @@ mod test { bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0, 128, 10000, 000, 1, 200), Some(1..100)); - assert_eq!(bc.needed_blocks(peer0, 128, 10000, 600, 1, 200), None); // too far ahead + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100)); + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead assert_eq!( - bc.needed_blocks(peer0, 128, 10000, 600, 1, 200000), + bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), Some(100 + 128..100 + 128 + 128) ); } @@ -382,11 +382,11 @@ mod test { let blocks = generate_blocks(10); // count = 5, peer_best = 50, common = 39, max_parallel = 0, max_ahead = 200 - assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45)); + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45)); // got a response on the request for `40..45` bc.clear_peer_download(&peer); - bc.insert(40, blocks[..5].to_vec(), peer); + bc.insert(40, blocks[..5].to_vec(), peer.clone()); // our "node" started on a fork, with its current best = 47, which is > common let ready = bc.ready_blocks(48); @@ -394,11 +394,11 @@ mod test { ready, blocks[..5] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) }) .collect::>() ); - assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50)); + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); } #[test] @@ -410,12 +410,12 @@ mod test { let blocks = generate_blocks(10); // Request 2 ranges - assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45)); - assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50)); + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45)); + assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); // got a response on the request for `40..50` bc.clear_peer_download(&peer); - bc.insert(40, blocks.to_vec(), peer); + bc.insert(40, blocks.to_vec(), peer.clone()); // request any blocks starting from 1000 or lower. let ready = bc.ready_blocks(1000); @@ -423,7 +423,7 @@ mod test { ready, blocks .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) }) .collect::>() ); diff --git a/client/network/sync/src/extra_requests.rs b/client/network/sync/src/extra_requests.rs index 0506bd542ff3b..6206f8a61bcf4 100644 --- a/client/network/sync/src/extra_requests.rs +++ b/client/network/sync/src/extra_requests.rs @@ -446,12 +446,16 @@ mod tests { PeerSyncState::DownloadingJustification(r.0); } - let active = requests.active_requests.iter().map(|(&p, &r)| (p, r)).collect::>(); + let active = requests + .active_requests + .iter() + .map(|(p, &r)| (p.clone(), r)) + .collect::>(); for (peer, req) in &active { assert!(requests.failed_requests.get(req).is_none()); assert!(!requests.pending_requests.contains(req)); - assert!(requests.on_response::<()>(*peer, None).is_none()); + assert!(requests.on_response::<()>(peer.clone(), None).is_none()); assert!(requests.pending_requests.contains(req)); assert_eq!( 1, diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 75ecb9322ca78..2f837cd6c4f51 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -30,20 +30,15 @@ pub mod block_request_handler; pub mod blocks; -pub mod mock; mod schema; -pub mod service; pub mod state; pub mod state_request_handler; -#[cfg(test)] -mod tests; pub mod warp; pub mod warp_request_handler; use crate::{ blocks::BlockCollection, schema::v1::{StateRequest, StateResponse}, - service::chain_sync::{ChainSyncInterfaceHandle, ToServiceCommand}, state::StateSync, warp::{WarpProofImportResult, WarpSync}, }; @@ -55,23 +50,16 @@ use log::{debug, error, info, trace, warn}; use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network_common::{ - config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, - }, - protocol::role::Roles, - sync::{ - message::{ - BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, - BlockResponse, Direction, FromBlock, - }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, - BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, - OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, - PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, +use sc_network_common::sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, + FromBlock, }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, }; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{ @@ -88,12 +76,10 @@ use sp_runtime::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - iter, ops::Range, pin::Pin, sync::Arc, }; -use warp::TargetBlockImportResult; mod extra_requests; @@ -134,9 +120,6 @@ const MAJOR_SYNC_BLOCKS: u8 = 5; /// Number of peers that need to be connected before warp sync is started. const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; -/// Maximum allowed size for a block announce. -const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; - mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -269,10 +252,6 @@ pub struct ChainSync { import_existing: bool, /// Gap download process. gap_sync: Option>, - /// Channel for receiving service commands - service_rx: TracingUnboundedReceiver>, - /// Handle for communicating with `NetworkService` - _network_service: service::network::NetworkServiceHandle, } /// All the data we have about a Peer that we are trying to sync with @@ -336,8 +315,6 @@ pub enum PeerSyncState { DownloadingState, /// Downloading warp proof. DownloadingWarpProof, - /// Downloading warp sync target block. - DownloadingWarpTargetBlock, /// Actively downloading block history after warp sync. DownloadingGap(NumberFor), } @@ -412,21 +389,13 @@ where /// Returns the current sync status. fn status(&self) -> SyncStatus { - let median_seen = self.median_seen(); - let best_seen_block = - median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); - let sync_state = if let Some(target) = median_seen { + let best_seen = self.best_seen(); + let sync_state = if let Some(n) = best_seen { // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing - // if the same can be said about queued blocks. + // more than `MAJOR_SYNC_BLOCKS` behind the best block. let best_block = self.client.info().best_number; - if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { - // If target is not queued, we're downloading, otherwise importing. - if target > self.best_queued_number { - SyncState::Downloading { target } - } else { - SyncState::Importing { target } - } + if n > best_block && n - best_block > MAJOR_SYNC_BLOCKS.into() { + SyncState::Downloading } else { SyncState::Idle } @@ -447,7 +416,7 @@ where SyncStatus { state: sync_state, - best_seen_block, + best_seen_block: best_seen, num_peers: self.peers.len() as u32, queued_blocks: self.queue_blocks.len() as u32, state_sync: self.state_sync.as_ref().map(|s| s.progress()), @@ -661,9 +630,9 @@ where .extend(peers); } - fn justification_requests<'a>( - &'a mut self, - ) -> Box)> + 'a> { + fn justification_requests( + &mut self, + ) -> Box)> + '_> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); Box::new(std::iter::from_fn(move || { @@ -678,6 +647,7 @@ where id: 0, fields: BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(request.0), + to: None, direction: Direction::Ascending, max: Some(1), }; @@ -688,14 +658,11 @@ where })) } - fn block_requests<'a>( - &'a mut self, - ) -> Box)> + 'a> { - if self.mode == SyncMode::Warp { - return Box::new(std::iter::once(self.warp_target_block_request()).flatten()) - } - - if self.allowed_requests.is_empty() || self.state_sync.is_some() { + fn block_requests(&mut self) -> Box)> + '_> { + if self.allowed_requests.is_empty() || + self.state_sync.is_some() || + self.mode == SyncMode::Warp + { return Box::new(std::iter::empty()) } @@ -703,7 +670,7 @@ where trace!(target: "sync", "Too many blocks in the queue."); return Box::new(std::iter::empty()) } - let is_major_syncing = self.status().state.is_major_syncing(); + let major_sync = self.status().state == SyncState::Downloading; let attrs = self.required_block_attributes(); let blocks = &mut self.blocks; let fork_targets = &mut self.fork_targets; @@ -713,10 +680,10 @@ where let client = &self.client; let queue = &self.queue_blocks; let allowed_requests = self.allowed_requests.take(); - let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; + let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; let gap_sync = &mut self.gap_sync; - let iter = self.peers.iter_mut().filter_map(move |(&id, peer)| { - if !peer.state.is_available() || !allowed_requests.contains(&id) { + let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { + if !peer.state.is_available() || !allowed_requests.contains(id) { return None } @@ -745,7 +712,7 @@ where }; Some((id, ancestry_request::(current))) } else if let Some((range, req)) = peer_block_request( - &id, + id, peer, blocks, attrs, @@ -764,7 +731,7 @@ where ); Some((id, req)) } else if let Some((hash, req)) = - fork_sync_request(&id, fork_targets, best_queued, last_finalized, attrs, |hash| { + fork_sync_request(id, fork_targets, best_queued, last_finalized, attrs, |hash| { if queue.contains(hash) { BlockStatus::Queued } else { @@ -776,7 +743,7 @@ where Some((id, req)) } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { peer_gap_block_request( - &id, + id, peer, &mut sync.blocks, attrs, @@ -857,7 +824,7 @@ where // Only one pending state request is allowed. return None } - if let Some(request) = sync.next_warp_proof_request() { + if let Some(request) = sync.next_warp_poof_request() { let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); if !targets.is_empty() { targets.sort(); @@ -1064,40 +1031,6 @@ where Vec::new() } }, - PeerSyncState::DownloadingWarpTargetBlock => { - peer.state = PeerSyncState::Available; - if let Some(warp_sync) = &mut self.warp_sync { - if blocks.len() == 1 { - validate_blocks::(&blocks, who, Some(request))?; - match warp_sync.import_target_block( - blocks.pop().expect("`blocks` len checked above."), - ) { - TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), - TargetBlockImportResult::BadResponse => - return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), - } - } else if blocks.is_empty() { - debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } else { - debug!( - target: "sync", - "Too many blocks ({}) in warp target block response from {}", - blocks.len(), - who, - ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - } else { - debug!( - target: "sync", - "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", - who, - ); - return Ok(OnBlockData::Continue) - } - }, PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | PeerSyncState::DownloadingState | @@ -1179,14 +1112,14 @@ where }; match import_result { - state::ImportResult::Import(hash, header, state, body, justifications) => { + state::ImportResult::Import(hash, header, state) => { let origin = BlockOrigin::NetworkInitialSync; let block = IncomingBlock { hash, header: Some(header), - body, + body: None, indexed_body: None, - justifications, + justifications: None, origin: None, allow_missing_state: true, import_existing: true, @@ -1454,7 +1387,8 @@ where if let SyncMode::LightState { skip_proofs, .. } = &self.mode { if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { // Finalized a recent block. - let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); + let mut heads: Vec<_> = + self.peers.iter().map(|(_, peer)| peer.best_number).collect(); heads.sort(); let median = heads[heads.len() / 2]; if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { @@ -1465,13 +1399,8 @@ where number, hash, ); - self.state_sync = Some(StateSync::new( - self.client.clone(), - header, - None, - None, - *skip_proofs, - )); + self.state_sync = + Some(StateSync::new(self.client.clone(), header, *skip_proofs)); self.allowed_requests.set_all(); } } @@ -1636,6 +1565,7 @@ where FromBlock::Number(n) => Some(schema::v1::block_request::FromBlock::Number(n.encode())), }, + to_block: request.to.map(|h| h.encode()).unwrap_or_default(), direction: request.direction as i32, max_blocks: request.max.unwrap_or(0), support_multiple_justifications: true, @@ -1741,21 +1671,6 @@ where Ok(OpaqueStateResponse(Box::new(response))) } - - fn poll( - &mut self, - cx: &mut std::task::Context, - ) -> Poll> { - while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { - match event { - ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { - self.set_sync_fork_request(peers, &hash, number); - }, - } - } - - self.poll_block_announce_validation(cx) - } } impl ChainSync @@ -1777,10 +1692,7 @@ where block_announce_validator: Box + Send>, max_parallel_downloads: u32, warp_sync_provider: Option>>, - _network_service: service::network::NetworkServiceHandle, - ) -> Result<(Self, Box>), ClientError> { - let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); - + ) -> Result { let mut sync = Self { client, peers: HashMap::new(), @@ -1802,15 +1714,13 @@ where warp_sync_provider, import_existing: false, gap_sync: None, - service_rx, - _network_service, }; sync.reset_sync_start_point()?; - Ok((sync, Box::new(ChainSyncInterfaceHandle::new(tx)))) + Ok(sync) } - /// Returns the median seen block number. - fn median_seen(&self) -> Option> { + /// Returns the best seen block number if we don't have that block yet, `None` otherwise. + fn best_seen(&self) -> Option> { let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); if best_seens.is_empty() { @@ -1819,7 +1729,12 @@ where let middle = best_seens.len() / 2; // Not the "perfect median" when we have an even number of peers. - Some(*best_seens.select_nth_unstable(middle).1) + let median = *best_seens.select_nth_unstable(middle).1; + if median > self.best_queued_number { + Some(median) + } else { + None + } } } @@ -1861,7 +1776,7 @@ where ); } - let origin = if !gap && !self.status().state.is_major_syncing() { + let origin = if !gap && self.status().state != SyncState::Downloading { BlockOrigin::NetworkBroadcast } else { BlockOrigin::NetworkInitialSync @@ -2043,17 +1958,17 @@ where return PollBlockAnnounceValidation::Nothing { is_best, who, announce } }; - if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: "sync", "Peer state is ancestor search."); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } - } - if is_best { // update their best block peer.best_number = number; peer.best_hash = hash; } + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: "sync", "Peer state is ancestor search."); + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } + } + // If the announced block is the best they have and is not ahead of us, our common number // is either one further ahead or it's the one they just announced, if we know about it. if is_best { @@ -2248,80 +2163,6 @@ where }) .collect() } - - /// Generate block request for downloading of the target block body during warp sync. - fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { - if let Some(sync) = &self.warp_sync { - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers - .iter() - .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) - { - // Only one pending warp target block request is allowed. - return None - } - if let Some((target_number, request)) = sync.next_target_block_request() { - // Find a random peer that has a block with the target number. - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= target_number { - trace!(target: "sync", "New warp target block request for {}", id); - peer.state = PeerSyncState::DownloadingWarpTargetBlock; - self.allowed_requests.clear(); - return Some((*id, request)) - } - } - } - } - None - } - - /// Get config for the block announcement protocol - pub fn get_block_announce_proto_config( - &self, - protocol_id: ProtocolId, - fork_id: &Option, - roles: Roles, - best_number: NumberFor, - best_hash: B::Hash, - genesis_hash: B::Hash, - ) -> NonDefaultSetConfig { - let block_announces_protocol = { - let genesis_hash = genesis_hash.as_ref(); - if let Some(ref fork_id) = fork_id { - format!( - "/{}/{}/block-announces/1", - array_bytes::bytes2hex("", genesis_hash), - fork_id - ) - } else { - format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) - } - }; - - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once( - format!("/{}/block-announces/1", protocol_id.as_ref()).into(), - ) - .collect(), - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( - roles, - best_number, - best_hash, - genesis_hash, - ))), - // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement - // protocol is still hardcoded into the peerset. - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } } // This is purely during a backwards compatible transitionary period and should be removed @@ -2341,6 +2182,7 @@ fn ancestry_request(block: NumberFor) -> BlockRequest { id: 0, fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, from: FromBlock::Number(block), + to: None, direction: Direction::Ascending, max: Some(1), } @@ -2456,6 +2298,7 @@ fn peer_block_request( id: 0, fields: attrs, from, + to: None, direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2489,6 +2332,7 @@ fn peer_gap_block_request( id: 0, fields: attrs, from, + to: None, direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2516,7 +2360,7 @@ fn fork_sync_request( true }); for (hash, r) in targets { - if !r.peers.contains(&id) { + if !r.peers.contains(id) { continue } // Download the fork only if it is behind or not too far ahead our tip of the chain @@ -2538,6 +2382,7 @@ fn fork_sync_request( id: 0, fields: attributes, from: FromBlock::Hash(*hash), + to: None, direction: Direction::Descending, max: Some(count), }, @@ -2674,7 +2519,6 @@ fn validate_blocks( #[cfg(test)] mod test { use super::*; - use crate::service::network::NetworkServiceProvider; use futures::{executor::block_on, future::poll_fn}; use sc_block_builder::BlockBuilderProvider; use sc_network_common::sync::message::{BlockData, BlockState, FromBlock}; @@ -2696,17 +2540,9 @@ mod test { let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( - SyncMode::Full, - client.clone(), - block_announce_validator, - 1, - None, - chain_sync_network_handle, - ) - .unwrap(); + let mut sync = + ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1, None) + .unwrap(); let (a1_hash, a1_number) = { let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -2752,16 +2588,12 @@ mod test { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, None, - chain_sync_network_handle, ) .unwrap(); @@ -2782,15 +2614,15 @@ mod test { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); + sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap(); + sync.new_peer(peer_id2.clone(), Hash::random(), 10).unwrap(); // we wil send block requests to these peers // for these blocks we don't know about - assert!(sync.block_requests().all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + assert!(sync.block_requests().all(|(p, _)| { *p == peer_id1 || *p == peer_id2 })); // add a new peer at a known block - sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); + sync.new_peer(peer_id3.clone(), b1_hash, b1_number).unwrap(); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -2800,7 +2632,8 @@ mod test { assert!(sync.justification_requests().any(|(p, r)| { p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && - r.from == FromBlock::Hash(b1_hash) + r.from == FromBlock::Hash(b1_hash) && + r.to == None })); assert_eq!( @@ -2840,7 +2673,7 @@ mod test { data: Some(Vec::new()), }; - sync.push_block_announce_validation(*peer_id, header.hash(), block_annnounce, true); + sync.push_block_announce_validation(peer_id.clone(), header.hash(), block_annnounce, true); // Poll until we have procssed the block announcement block_on(poll_fn(|cx| loop { @@ -2882,7 +2715,7 @@ mod test { log::trace!(target: "sync", "Requests: {:?}", requests); assert_eq!(1, requests.len()); - assert_eq!(*peer, requests[0].0); + assert_eq!(peer, requests[0].0); let request = requests[0].1.clone(); @@ -2922,16 +2755,13 @@ mod test { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, None, - chain_sync_network_handle, ) .unwrap(); @@ -2960,8 +2790,8 @@ mod test { let block3_fork = build_block_at(block2.hash(), false); // Add two peers which are on block 1. - sync.new_peer(peer_id1, block1.hash(), 1).unwrap(); - sync.new_peer(peer_id2, block1.hash(), 1).unwrap(); + sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap(); // Tell sync that our best block is 3. sync.update_chain_info(&block3.hash(), 3); @@ -3039,17 +2869,14 @@ mod test { }; let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, None, - chain_sync_network_handle, ) .unwrap(); @@ -3058,9 +2885,9 @@ mod test { let best_block = blocks.last().unwrap().clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) + sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) .unwrap(); - sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); + sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap(); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -3095,9 +2922,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), + b.header().number().clone(), Default::default(), - Some(peer_id1), + Some(peer_id1.clone()), )), b.hash(), ) @@ -3118,9 +2945,9 @@ mod test { send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); let (peer1_req, peer2_req) = sync.block_requests().fold((None, None), |res, req| { - if req.0 == peer_id1 { + if req.0 == &peer_id1 { (Some(req.1), res.1) - } else if req.0 == peer_id2 { + } else if req.0 == &peer_id2 { (res.0, Some(req.1)) } else { panic!("Unexpected req: {:?}", req) @@ -3163,8 +2990,6 @@ mod test { fn can_sync_huge_fork() { sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) .map(|_| build_block(&mut client, None, false)) @@ -3189,26 +3014,27 @@ mod test { let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, None, - chain_sync_network_handle, ) .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); + client + .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) + .unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) .unwrap(); send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); @@ -3233,7 +3059,7 @@ mod test { } // Now request and import the fork. - let mut best_block_num = *finalized_block.header().number() as u32; + let mut best_block_num = finalized_block.header().number().clone() as u32; while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { let request = get_block_request( &mut sync, @@ -3266,9 +3092,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), + b.header().number().clone(), Default::default(), - Some(peer_id1), + Some(peer_id1.clone()), )), b.hash(), ) @@ -3295,8 +3121,6 @@ mod test { fn syncs_fork_without_duplicate_requests() { sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) .map(|_| build_block(&mut client, None, false)) @@ -3321,26 +3145,27 @@ mod test { let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, None, - chain_sync_network_handle, ) .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); + client + .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) + .unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) .unwrap(); send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); @@ -3365,7 +3190,7 @@ mod test { } // Now request and import the fork. - let mut best_block_num = *finalized_block.header().number() as u32; + let mut best_block_num = finalized_block.header().number().clone() as u32; let mut request = get_block_request( &mut sync, FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), @@ -3406,9 +3231,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), + b.header().number().clone(), Default::default(), - Some(peer_id1), + Some(peer_id1.clone()), )), b.hash(), ) @@ -3448,25 +3273,22 @@ mod test { #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, None, - chain_sync_network_handle, ) .unwrap(); let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) .unwrap(); // Create a "new" header and announce it @@ -3482,26 +3304,23 @@ mod test { #[test] fn can_import_response_with_missing_blocks() { sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); let mut client2 = Arc::new(TestClientBuilder::new().build()); let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); let empty_client = Arc::new(TestClientBuilder::new().build()); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, empty_client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, None, - chain_sync_network_handle, ) .unwrap(); let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) + sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) .unwrap(); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs deleted file mode 100644 index fbb54bd5e998d..0000000000000 --- a/client/network/sync/src/mock.rs +++ /dev/null @@ -1,122 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Contains a mock implementation of `ChainSync` that can be used -//! for testing calls made to `ChainSync`. - -use futures::task::Poll; -use libp2p::PeerId; -use sc_consensus::{BlockImportError, BlockImportStatus}; -use sc_network_common::sync::{ - message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, - warp::{EncodedProof, WarpProofRequest}, - BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, - OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, - PollBlockAnnounceValidation, SyncStatus, -}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -mockall::mock! { - pub ChainSync {} - - impl ChainSyncT for ChainSync { - fn peer_info(&self, who: &PeerId) -> Option>; - fn status(&self) -> SyncStatus; - fn num_sync_requests(&self) -> usize; - fn num_downloaded_blocks(&self) -> usize; - fn num_peers(&self) -> usize; - fn new_peer( - &mut self, - who: PeerId, - best_hash: Block::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer>; - fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); - fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); - fn clear_justification_requests(&mut self); - fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &Block::Hash, - number: NumberFor, - ); - fn justification_requests<'a>( - &'a mut self, - ) -> Box)> + 'a>; - fn block_requests<'a>(&'a mut self) -> Box)> + 'a>; - fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; - fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)>; - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer>; - fn on_state_data( - &mut self, - who: &PeerId, - response: OpaqueStateResponse, - ) -> Result, BadPeer>; - fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer>; - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer>; - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, Block::Hash)>, - ) -> Box), BadPeer>>>; - fn on_justification_import( - &mut self, - hash: Block::Hash, - number: NumberFor, - success: bool, - ); - fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - fn push_block_announce_validation( - &mut self, - who: PeerId, - hash: Block::Hash, - announce: BlockAnnounce, - is_best: bool, - ); - fn poll_block_announce_validation<'a>( - &mut self, - cx: &mut std::task::Context<'a>, - ) -> Poll>; - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; - fn metrics(&self) -> Metrics; - fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest; - fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String>; - fn decode_block_response(&self, response: &[u8]) -> Result; - fn block_response_into_blocks( - &self, - request: &BlockRequest, - response: OpaqueBlockResponse, - ) -> Result>, String>; - fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String>; - fn decode_state_response(&self, response: &[u8]) -> Result; - fn poll<'a>( - &mut self, - cx: &mut std::task::Context<'a>, - ) -> Poll>; - } -} diff --git a/client/network/sync/src/schema/api.v1.proto b/client/network/sync/src/schema/api.v1.proto index 1490f61a41ddd..b51137d1d51d4 100644 --- a/client/network/sync/src/schema/api.v1.proto +++ b/client/network/sync/src/schema/api.v1.proto @@ -23,8 +23,9 @@ message BlockRequest { // Start with given block number. bytes number = 3; } + // End at this block. An implementation defined maximum is used when unspecified. + bytes to_block = 4; // optional // Sequence direction. - // If missing, should be interpreted as "Ascending". Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. uint32 max_blocks = 6; // optional diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs deleted file mode 100644 index cf07c65ee3109..0000000000000 --- a/client/network/sync/src/service/chain_sync.rs +++ /dev/null @@ -1,58 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use libp2p::PeerId; -use sc_network_common::service::NetworkSyncForkRequest; -use sc_utils::mpsc::TracingUnboundedSender; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -/// Commands send to `ChainSync` -#[derive(Debug)] -pub enum ToServiceCommand { - SetSyncForkRequest(Vec, B::Hash, NumberFor), -} - -/// Handle for communicating with `ChainSync` asynchronously -pub struct ChainSyncInterfaceHandle { - tx: TracingUnboundedSender>, -} - -impl ChainSyncInterfaceHandle { - /// Create new handle - pub fn new(tx: TracingUnboundedSender>) -> Self { - Self { tx } - } -} - -impl NetworkSyncForkRequest> - for ChainSyncInterfaceHandle -{ - /// Configure an explicit fork sync request. - /// - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// - /// Passing empty `peers` set effectively removes the sync request. - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self - .tx - .unbounded_send(ToServiceCommand::SetSyncForkRequest(peers, hash, number)); - } -} diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs deleted file mode 100644 index c146e1ec07b48..0000000000000 --- a/client/network/sync/src/service/mock.rs +++ /dev/null @@ -1,75 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use sc_network_common::service::{NetworkPeers, NetworkSyncForkRequest}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; -use libp2p::{Multiaddr, PeerId}; -use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName}; -use sc_peerset::ReputationChange; -use std::collections::HashSet; - -mockall::mock! { - pub ChainSyncInterface {} - - impl NetworkSyncForkRequest> - for ChainSyncInterface - { - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor); - } -} - -mockall::mock! { - pub NetworkServiceHandle {} -} - -// Mocked `Network` for `ChainSync`-related tests -mockall::mock! { - pub Network {} - - impl NetworkPeers for Network { - fn set_authorized_peers(&self, peers: HashSet); - fn set_authorized_only(&self, reserved_only: bool); - fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); - fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); - fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); - fn accept_unreserved_peers(&self); - fn deny_unreserved_peers(&self); - fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>; - fn remove_reserved_peer(&self, peer_id: PeerId); - fn set_reserved_peers( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String>; - fn add_peers_to_reserved_set( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String>; - fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec); - fn add_to_peers_set( - &self, - protocol: ProtocolName, - peers: HashSet, - ) -> Result<(), String>; - fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec); - fn sync_num_connected(&self) -> usize; - } -} diff --git a/client/network/sync/src/service/mod.rs b/client/network/sync/src/service/mod.rs deleted file mode 100644 index 692aa26985458..0000000000000 --- a/client/network/sync/src/service/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! `ChainSync`-related service code - -pub mod chain_sync; -pub mod mock; -pub mod network; diff --git a/client/network/sync/src/service/network.rs b/client/network/sync/src/service/network.rs deleted file mode 100644 index 44ed177661264..0000000000000 --- a/client/network/sync/src/service/network.rs +++ /dev/null @@ -1,128 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use futures::StreamExt; -use libp2p::PeerId; -use sc_network_common::{protocol::ProtocolName, service::NetworkPeers}; -use sc_peerset::ReputationChange; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use std::sync::Arc; - -/// Network-related services required by `sc-network-sync` -pub trait Network: NetworkPeers {} - -impl Network for T where T: NetworkPeers {} - -/// Network service provider for `ChainSync` -/// -/// It runs as an asynchronous task and listens to commands coming from `ChainSync` and -/// calls the `NetworkService` on its behalf. -pub struct NetworkServiceProvider { - rx: TracingUnboundedReceiver, -} - -/// Commands that `ChainSync` wishes to send to `NetworkService` -pub enum ToServiceCommand { - /// Call `NetworkPeers::disconnect_peer()` - DisconnectPeer(PeerId, ProtocolName), - - /// Call `NetworkPeers::report_peer()` - ReportPeer(PeerId, ReputationChange), -} - -/// Handle that is (temporarily) passed to `ChainSync` so it can -/// communicate with `NetworkService` through `SyncingEngine` -#[derive(Clone)] -pub struct NetworkServiceHandle { - tx: TracingUnboundedSender, -} - -impl NetworkServiceHandle { - /// Create new service handle - pub fn new(tx: TracingUnboundedSender) -> NetworkServiceHandle { - Self { tx } - } - - /// Report peer - pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - let _ = self.tx.unbounded_send(ToServiceCommand::ReportPeer(who, cost_benefit)); - } - - /// Disconnect peer - pub fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let _ = self.tx.unbounded_send(ToServiceCommand::DisconnectPeer(who, protocol)); - } -} - -impl NetworkServiceProvider { - /// Create new `NetworkServiceProvider` - pub fn new() -> (Self, NetworkServiceHandle) { - let (tx, rx) = tracing_unbounded("mpsc_network_service_provider"); - - (Self { rx }, NetworkServiceHandle::new(tx)) - } - - /// Run the `NetworkServiceProvider` - pub async fn run(mut self, service: Arc) { - while let Some(inner) = self.rx.next().await { - match inner { - ToServiceCommand::DisconnectPeer(peer, protocol_name) => - service.disconnect_peer(peer, protocol_name), - ToServiceCommand::ReportPeer(peer, reputation_change) => - service.report_peer(peer, reputation_change), - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::service::mock::MockNetwork; - - // typical pattern in `Protocol` code where peer is disconnected - // and then reported - #[async_std::test] - async fn disconnect_and_report_peer() { - let (provider, handle) = NetworkServiceProvider::new(); - - let peer = PeerId::random(); - let proto = ProtocolName::from("test-protocol"); - let proto_clone = proto.clone(); - let change = sc_peerset::ReputationChange::new_fatal("test-change"); - - let mut mock_network = MockNetwork::new(); - mock_network - .expect_disconnect_peer() - .withf(move |in_peer, in_proto| &peer == in_peer && &proto == in_proto) - .once() - .returning(|_, _| ()); - mock_network - .expect_report_peer() - .withf(move |in_peer, in_change| &peer == in_peer && &change == in_change) - .once() - .returning(|_, _| ()); - - async_std::task::spawn(async move { - provider.run(Arc::new(mock_network)).await; - }); - - handle.disconnect_peer(peer, proto_clone); - handle.report_peer(peer, change); - } -} diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index 9f64b52334c8a..e70d3b6b33a28 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -26,10 +26,7 @@ use sc_consensus::ImportedState; use sc_network_common::sync::StateDownloadProgress; use smallvec::SmallVec; use sp_core::storage::well_known_keys; -use sp_runtime::{ - traits::{Block as BlockT, Header, NumberFor}, - Justifications, -}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use std::{collections::HashMap, sync::Arc}; /// State sync state machine. Accumulates partial state data until it @@ -38,8 +35,6 @@ pub struct StateSync { target_block: B::Hash, target_header: B::Header, target_root: B::Hash, - target_body: Option>, - target_justifications: Option, last_key: SmallVec<[Vec; 2]>, state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, @@ -51,7 +46,7 @@ pub struct StateSync { /// Import state chunk result. pub enum ImportResult { /// State is complete and ready for import. - Import(B::Hash, B::Header, ImportedState, Option>, Option), + Import(B::Hash, B::Header, ImportedState), /// Continue downloading. Continue, /// Bad state chunk. @@ -64,20 +59,12 @@ where Client: ProofProvider + Send + Sync + 'static, { /// Create a new instance. - pub fn new( - client: Arc, - target_header: B::Header, - target_body: Option>, - target_justifications: Option, - skip_proof: bool, - ) -> Self { + pub fn new(client: Arc, target: B::Header, skip_proof: bool) -> Self { Self { client, - target_block: target_header.hash(), - target_root: *target_header.state_root(), - target_header, - target_body, - target_justifications, + target_block: target.hash(), + target_root: *target.state_root(), + target_header: target, last_key: SmallVec::default(), state: HashMap::default(), complete: false, @@ -226,8 +213,6 @@ where block: self.target_block, state: std::mem::take(&mut self.state).into(), }, - self.target_body.clone(), - self.target_justifications.clone(), ) } else { ImportResult::Continue diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 98f1ed34d0581..8e0bae14046da 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -27,15 +27,14 @@ use libp2p::PeerId; use log::{debug, trace}; use lru::LruCache; use prost::Message; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::ProofProvider; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, }; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{ hash::{Hash, Hasher}, - num::NonZeroUsize, sync::Arc, time::Duration, }; @@ -51,16 +50,10 @@ mod rep { pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); } -/// Generates a [`ProtocolConfig`] for the state request protocol, refusing incoming requests. -pub fn generate_protocol_config>( - protocol_id: &ProtocolId, - genesis_hash: Hash, - fork_id: Option<&str>, -) -> ProtocolConfig { +/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(genesis_hash, fork_id).into(), - fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) - .collect(), + name: generate_protocol_name(protocol_id).into(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(40), @@ -68,18 +61,8 @@ pub fn generate_protocol_config>( } } -/// Generate the state protocol name from the genesis hash and fork id. -fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!("/{}/{}/state/2", array_bytes::bytes2hex("", genesis_hash), fork_id) - } else { - format!("/{}/state/2", array_bytes::bytes2hex("", genesis_hash)) - } -} - -/// Generate the legacy state protocol name from chain specific protocol identifier. -fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the state protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/state/2", protocol_id.as_ref()) } @@ -121,12 +104,11 @@ pub struct StateRequestHandler { impl StateRequestHandler where B: BlockT, - Client: BlockBackend + ProofProvider + Send + Sync + 'static, + Client: ProofProvider + Send + Sync + 'static, { /// Create a new [`StateRequestHandler`]. pub fn new( protocol_id: &ProtocolId, - fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -134,20 +116,10 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config( - protocol_id, - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - fork_id, - ); + let mut protocol_config = generate_protocol_config(protocol_id); protocol_config.inbound_queue = Some(tx); - let capacity = - NonZeroUsize::new(num_peer_hint.max(1) * 2).expect("cache capacity is not zero"); - let seen_requests = LruCache::new(capacity); + let seen_requests = LruCache::new(num_peer_hint * 2); (Self { client, request_receiver, seen_requests }, protocol_config) } @@ -208,14 +180,14 @@ where if !request.no_proof { let (proof, _count) = self.client.read_proof_collection( - block, + &BlockId::hash(block), request.start.as_slice(), MAX_RESPONSE_BYTES, )?; response.proof = proof.encode(); } else { let entries = self.client.storage_collection( - block, + &BlockId::hash(block), request.start.as_slice(), MAX_RESPONSE_BYTES, )?; diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs deleted file mode 100644 index 479c78bfdea97..0000000000000 --- a/client/network/sync/src/tests.rs +++ /dev/null @@ -1,61 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::{service::network::NetworkServiceProvider, ChainSync, ForkTarget}; - -use libp2p::PeerId; -use sc_network_common::{service::NetworkSyncForkRequest, sync::ChainSync as ChainSyncT}; -use sp_consensus::block_validation::DefaultBlockAnnounceValidator; -use sp_core::H256; -use std::{sync::Arc, task::Poll}; -use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; - -// verify that the fork target map is empty, then submit a new sync fork request, -// poll `ChainSync` and verify that a new sync fork request has been registered -#[async_std::test] -async fn delegate_to_chainsync() { - let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut chain_sync, chain_sync_service) = ChainSync::new( - sc_network_common::sync::SyncMode::Full, - Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), - Box::new(DefaultBlockAnnounceValidator), - 1u32, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let hash = H256::random(); - let in_number = 1337u64; - let peers = (0..3).map(|_| PeerId::random()).collect::>(); - - assert!(chain_sync.fork_targets.is_empty()); - chain_sync_service.set_sync_fork_request(peers, hash, in_number); - - futures::future::poll_fn(|cx| { - let _ = chain_sync.poll(cx); - Poll::Ready(()) - }) - .await; - - if let Some(ForkTarget { number, .. }) = chain_sync.fork_targets.get(&hash) { - assert_eq!(number, &in_number); - } else { - panic!("expected to contain `ForkTarget`"); - } -} diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index ab8a7c66b9856..f3fad6c1b7fdb 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -23,21 +23,17 @@ use crate::{ state::{ImportResult, StateSync}, }; use sc_client_api::ProofProvider; -use sc_network_common::sync::{ - message::{BlockAttributes, BlockData, BlockRequest, Direction, FromBlock}, - warp::{ - EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, - WarpSyncProvider, - }, +use sc_network_common::sync::warp::{ + EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, + WarpSyncProvider, }; use sp_blockchain::HeaderBackend; use sp_finality_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::sync::Arc; enum Phase { WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash }, - TargetBlock(B::Header), State(StateSync), } @@ -49,14 +45,6 @@ pub enum WarpProofImportResult { BadResponse, } -/// Import target block result. -pub enum TargetBlockImportResult { - /// Import was successful. - Success, - /// Invalid block. - BadResponse, -} - /// Warp sync state machine. Accumulates warp proofs and state. pub struct WarpSync { phase: Phase, @@ -84,7 +72,7 @@ where /// Validate and import a state response. pub fn import_state(&mut self, response: StateResponse) -> ImportResult { match &mut self.phase { - Phase::WarpProof { .. } | Phase::TargetBlock(_) => { + Phase::WarpProof { .. } => { log::debug!(target: "sync", "Unexpected state response"); ImportResult::BadResponse }, @@ -95,7 +83,7 @@ where /// Validate and import a warp proof response. pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult { match &mut self.phase { - Phase::State(_) | Phase::TargetBlock(_) => { + Phase::State(_) => { log::debug!(target: "sync", "Unexpected warp proof response"); WarpProofImportResult::BadResponse }, @@ -116,7 +104,8 @@ where Ok(VerificationResult::Complete(new_set_id, _, header)) => { log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id); self.total_proof_bytes += response.0.len() as u64; - self.phase = Phase::TargetBlock(header); + let state_sync = StateSync::new(self.client.clone(), header, false); + self.phase = Phase::State(state_sync); WarpProofImportResult::Success }, } @@ -124,99 +113,35 @@ where } } - /// Import the target block body. - pub fn import_target_block(&mut self, block: BlockData) -> TargetBlockImportResult { - match &mut self.phase { - Phase::WarpProof { .. } | Phase::State(_) => { - log::debug!(target: "sync", "Unexpected target block response"); - TargetBlockImportResult::BadResponse - }, - Phase::TargetBlock(header) => - if let Some(block_header) = &block.header { - if block_header == header { - if block.body.is_some() { - let state_sync = StateSync::new( - self.client.clone(), - header.clone(), - block.body, - block.justifications, - false, - ); - self.phase = Phase::State(state_sync); - TargetBlockImportResult::Success - } else { - log::debug!( - target: "sync", - "Importing target block failed: missing body.", - ); - TargetBlockImportResult::BadResponse - } - } else { - log::debug!( - target: "sync", - "Importing target block failed: different header.", - ); - TargetBlockImportResult::BadResponse - } - } else { - log::debug!(target: "sync", "Importing target block failed: missing header."); - TargetBlockImportResult::BadResponse - }, - } - } - /// Produce next state request. pub fn next_state_request(&self) -> Option { match &self.phase { Phase::WarpProof { .. } => None, - Phase::TargetBlock(_) => None, Phase::State(sync) => Some(sync.next_request()), } } /// Produce next warp proof request. - pub fn next_warp_proof_request(&self) -> Option> { + pub fn next_warp_poof_request(&self) -> Option> { match &self.phase { - Phase::WarpProof { last_hash, .. } => Some(WarpProofRequest { begin: *last_hash }), - Phase::TargetBlock(_) => None, - Phase::State(_) => None, - } - } - - /// Produce next target block request. - pub fn next_target_block_request(&self) -> Option<(NumberFor, BlockRequest)> { - match &self.phase { - Phase::WarpProof { .. } => None, - Phase::TargetBlock(header) => { - let request = BlockRequest:: { - id: 0, - fields: BlockAttributes::HEADER | - BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, - from: FromBlock::Hash(header.hash()), - direction: Direction::Ascending, - max: Some(1), - }; - Some((*header.number(), request)) - }, Phase::State(_) => None, + Phase::WarpProof { last_hash, .. } => Some(WarpProofRequest { begin: *last_hash }), } } /// Return target block hash if it is known. pub fn target_block_hash(&self) -> Option { match &self.phase { - Phase::WarpProof { .. } => None, - Phase::TargetBlock(_) => None, Phase::State(s) => Some(s.target()), + Phase::WarpProof { .. } => None, } } /// Return target block number if it is known. pub fn target_block_number(&self) -> Option> { match &self.phase { - Phase::WarpProof { .. } => None, - Phase::TargetBlock(header) => Some(*header.number()), Phase::State(s) => Some(s.target_block_num()), + Phase::WarpProof { .. } => None, } } @@ -224,7 +149,6 @@ where pub fn is_complete(&self) -> bool { match &self.phase { Phase::WarpProof { .. } => false, - Phase::TargetBlock(_) => false, Phase::State(sync) => sync.is_complete(), } } @@ -236,10 +160,6 @@ where phase: WarpSyncPhase::DownloadingWarpProofs, total_bytes: self.total_proof_bytes, }, - Phase::TargetBlock(_) => WarpSyncProgress { - phase: WarpSyncPhase::DownloadingTargetBlock, - total_bytes: self.total_proof_bytes, - }, Phase::State(sync) => WarpSyncProgress { phase: if self.is_complete() { WarpSyncPhase::ImportingState diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index e675bf45cad91..53ec216a1e668 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -36,15 +36,9 @@ const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing /// incoming requests. -pub fn generate_request_response_config>( - protocol_id: ProtocolId, - genesis_hash: Hash, - fork_id: Option<&str>, -) -> RequestResponseConfig { +pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { RequestResponseConfig { - name: generate_protocol_name(genesis_hash, fork_id).into(), - fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) - .collect(), + name: generate_protocol_name(protocol_id).into(), max_request_size: 32, max_response_size: MAX_RESPONSE_SIZE, request_timeout: Duration::from_secs(10), @@ -52,18 +46,8 @@ pub fn generate_request_response_config>( } } -/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. -fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!("/{}/{}/sync/warp", array_bytes::bytes2hex("", genesis_hash), fork_id) - } else { - format!("/{}/sync/warp", array_bytes::bytes2hex("", genesis_hash)) - } -} - -/// Generate the legacy grandpa warp sync protocol name from chain specific protocol identifier. -fn generate_legacy_protocol_name(protocol_id: ProtocolId) -> String { +/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: ProtocolId) -> String { format!("/{}/sync/warp", protocol_id.as_ref()) } @@ -75,16 +59,13 @@ pub struct RequestHandler { impl RequestHandler { /// Create a new [`RequestHandler`]. - pub fn new>( + pub fn new( protocol_id: ProtocolId, - genesis_hash: Hash, - fork_id: Option<&str>, backend: Arc>, ) -> (Self, RequestResponseConfig) { let (tx, request_receiver) = mpsc::channel(20); - let mut request_response_config = - generate_request_response_config(protocol_id, genesis_hash, fork_id); + let mut request_response_config = generate_request_response_config(protocol_id); request_response_config.inbound_queue = Some(tx); (Self { backend, request_receiver }, request_response_config) diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 30a57bc1b5171..1aa6ebd8bf357 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -14,12 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-std = "1.11.0" -async-trait = "0.1.57" +async-trait = "0.1.50" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.49.0", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -parking_lot = "0.12.1" +parking_lot = "0.12.0" rand = "0.7.2" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index b86f6787f30b5..a2bd5276c31d6 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -40,7 +40,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); - let justifications = client.justifications(hash).unwrap(); + let justifications = client.justifications(&BlockId::Number(1)).unwrap(); let peer_id = PeerId::random(); ( client, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 035fc0a972a59..4659684987f77 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -23,6 +23,7 @@ mod block_import; mod sync; use std::{ + borrow::Cow, collections::HashMap, marker::PhantomData, pin::Pin, @@ -44,40 +45,39 @@ use sc_client_api::{ }; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, - ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, - Verifier, + ForkChoiceStrategy, ImportResult, JustificationImport, LongestChain, Verifier, }; +pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, - Multiaddr, NetworkService, NetworkWorker, -}; -use sc_network_common::{ config::{ - MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, + MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, + SyncMode, TransportConfig, }, - protocol::{role::Roles, ProtocolName}, - service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, - sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, + Multiaddr, NetworkService, NetworkWorker, +}; +pub use sc_network_common::config::ProtocolId; +use sc_network_common::sync::warp::{ + AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, - state_request_handler::StateRequestHandler, warp_request_handler, ChainSync, + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + warp_request_handler, ChainSync, }; use sc_service::client::Client; use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, - Backend as BlockchainBackend, HeaderBackend, Info as BlockchainInfo, Result as ClientResult, + HeaderBackend, Info as BlockchainInfo, Result as ClientResult, }; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, - BlockOrigin, Error as ConsensusError, SyncOracle, + BlockOrigin, Error as ConsensusError, }; use sp_core::H256; use sp_runtime::{ codec::{Decode, Encode}, generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justification, Justifications, }; use substrate_test_runtime_client::AccountKeyring; @@ -173,14 +173,11 @@ impl PeersClient { Some(header) => header, None => return false, }; - self.backend.have_state_at(header.hash(), *header.number()) + self.backend.have_state_at(&header.hash(), *header.number()) } - pub fn justifications( - &self, - hash: ::Hash, - ) -> ClientResult> { - self.client.justifications(hash) + pub fn justifications(&self, block: &BlockId) -> ClientResult> { + self.client.justifications(block) } pub fn finality_notification_stream(&self) -> FinalityNotifications { @@ -193,11 +190,11 @@ impl PeersClient { pub fn finalize_block( &self, - hash: ::Hash, + id: BlockId, justification: Option, notify: bool, ) -> ClientResult<()> { - self.client.finalize_block(hash, justification, notify) + self.client.finalize_block(id, justification, notify) } } @@ -246,7 +243,7 @@ where { /// Get this peer ID. pub fn id(&self) -> PeerId { - self.network.service().local_peer_id() + *self.network.service().local_peer_id() } /// Returns true if we're major syncing. @@ -535,17 +532,10 @@ where self.verifier.failed_verifications.lock().clone() } - pub fn has_block(&self, hash: H256) -> bool { - self.backend - .as_ref() - .map(|backend| backend.blockchain().header(BlockId::hash(hash)).unwrap().is_some()) - .unwrap_or(false) - } - - pub fn has_body(&self, hash: H256) -> bool { + pub fn has_block(&self, hash: &H256) -> bool { self.backend .as_ref() - .map(|backend| backend.blockchain().body(hash).unwrap().is_some()) + .map(|backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some()) .unwrap_or(false) } } @@ -686,13 +676,11 @@ pub struct FullPeerConfig { /// Pruning window size. /// /// NOTE: only finalized blocks are subject for removal! - pub blocks_pruning: Option, + pub keep_blocks: Option, /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. - pub notifications_protocols: Vec, - /// List of request-response protocols that the network must support. - pub request_response_protocols: Vec, + pub notifications_protocols: Vec>, /// The indices of the peers the peer should be connected to. /// /// If `None`, it will be connected to all other peers. @@ -754,10 +742,10 @@ where /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { - let mut test_client_builder = match (config.blocks_pruning, config.storage_chain) { - (Some(blocks_pruning), true) => TestClientBuilder::with_tx_storage(blocks_pruning), + let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { + (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), - (Some(blocks_pruning), false) => TestClientBuilder::with_pruning_window(blocks_pruning), + (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), (None, false) => TestClientBuilder::with_default_backend(), }; if let Some(storage) = config.extra_storage { @@ -795,9 +783,6 @@ where network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; - network_config - .request_response_protocols - .extend(config.request_response_protocols); network_config.extra_sets = config .notifications_protocols .into_iter() @@ -805,7 +790,6 @@ where notifications_protocol: p, fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - handshake: None, set_config: Default::default(), }) .collect(); @@ -813,7 +797,7 @@ where let addrs = connect_to .iter() .map(|v| { - let peer_id = self.peer(*v).network_service().local_peer_id(); + let peer_id = *self.peer(*v).network_service().local_peer_id(); let multiaddr = self.peer(*v).listen_addr.clone(); MultiaddrWithPeerId { peer_id, multiaddr } }) @@ -824,25 +808,23 @@ where let protocol_id = ProtocolId::from("test-protocol-name"); - let fork_id = Some(String::from("test-fork-id")); - let block_request_protocol_config = { let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); + BlockRequestHandler::new(&protocol_id, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, None, client.clone(), 50); + StateRequestHandler::new(&protocol_id, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, None, client.clone()); + LightClientRequestHandler::new(&protocol_id, client.clone()); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -850,16 +832,8 @@ where let warp_sync = Arc::new(TestWarpSyncProvider(client.clone())); let warp_protocol_config = { - let (handler, protocol_config) = warp_request_handler::RequestHandler::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - None, - warp_sync.clone(), - ); + let (handler, protocol_config) = + warp_request_handler::RequestHandler::new(protocol_id.clone(), warp_sync.clone()); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -867,9 +841,7 @@ where let block_announce_validator = config .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); - let (chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let (chain_sync, chain_sync_service) = ChainSync::new( + let chain_sync = ChainSync::new( match network_config.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, SyncMode::Fast { skip_proofs, storage_chain_mode } => @@ -883,53 +855,34 @@ where block_announce_validator, network_config.max_parallel_downloads, Some(warp_sync), - chain_sync_network_handle, ) .unwrap(); - let block_announce_config = chain_sync.get_block_announce_proto_config( - protocol_id.clone(), - &fork_id, - Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ); - let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config, chain: client.clone(), + transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, - fork_id, import_queue, chain_sync: Box::new(chain_sync), - chain_sync_service, metrics_registry: None, - block_announce_config, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, warp_sync_protocol_config: Some(warp_protocol_config), - request_response_protocol_configs: Vec::new(), }) .unwrap(); trace!(target: "test_network", "Peer identifier: {}", network.service().local_peer_id()); - let service = network.service().clone(); - async_std::task::spawn(async move { - chain_sync_network_provider.run(service).await; - }); - self.mut_peers(move |peers| { for peer in peers.iter_mut() { peer.network - .add_known_address(network.service().local_peer_id(), listen_addr.clone()); + .add_known_address(*network.service().local_peer_id(), listen_addr.clone()); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); @@ -1124,7 +1077,7 @@ impl JustificationImport for ForceFinalized { justification: Justification, ) -> Result<(), Self::Error> { self.0 - .finalize_block(hash, Some(justification), true) + .finalize_block(BlockId::Hash(hash), Some(justification), true) .map_err(|_| ConsensusError::InvalidJustification) } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 4515677d0b1e0..84a5c2ca13fa5 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -246,40 +246,44 @@ fn sync_justifications() { net.peer(0).push_blocks(20, false); net.block_until_sync(); - let backend = net.peer(0).client().as_backend(); - let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); - let hashof15 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(15)).unwrap(); - let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); - // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(hashof10).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); // we finalize block #10, #15 and #20 for peer 0 with a justification let just = (*b"FRNK", Vec::new()); - net.peer(0).client().finalize_block(hashof10, Some(just.clone()), true).unwrap(); - net.peer(0).client().finalize_block(hashof15, Some(just.clone()), true).unwrap(); - net.peer(0).client().finalize_block(hashof20, Some(just.clone()), true).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some(just.clone()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(15), Some(just.clone()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(20), Some(just.clone()), true) + .unwrap(); - let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); - let hashof15 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap().hash(); - let hashof20 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap().hash(); + let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); + let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); + let h3 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap(); // peer 1 should get the justifications from the network - net.peer(1).request_justification(&hashof10, 10); - net.peer(1).request_justification(&hashof15, 15); - net.peer(1).request_justification(&hashof20, 20); + net.peer(1).request_justification(&h1.hash().into(), 10); + net.peer(1).request_justification(&h2.hash().into(), 15); + net.peer(1).request_justification(&h3.hash().into(), 20); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(hash).unwrap() != + for height in (10..21).step_by(5) { + if net.peer(0).client().justifications(&BlockId::Number(height)).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending } - if net.peer(1).client().justifications(hash).unwrap() != + if net.peer(1).client().justifications(&BlockId::Number(height)).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -305,7 +309,10 @@ fn sync_justifications_across_forks() { net.block_until_sync(); let just = (*b"FRNK", Vec::new()); - net.peer(0).client().finalize_block(f1_best, Some(just), true).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Hash(f1_best), Some(just), true) + .unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -313,9 +320,9 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justifications(f1_best).unwrap() == + if net.peer(0).client().justifications(&BlockId::Number(10)).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) && - net.peer(1).client().justifications(f1_best).unwrap() == + net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) @@ -360,10 +367,10 @@ fn syncs_all_forks() { net.block_until_sync(); // Check that all peers have all of the branches. - assert!(net.peer(0).has_block(b1)); - assert!(net.peer(0).has_block(b2)); - assert!(net.peer(1).has_block(b1)); - assert!(net.peer(1).has_block(b2)); + assert!(net.peer(0).has_block(&b1)); + assert!(net.peer(0).has_block(&b2)); + assert!(net.peer(1).has_block(&b1)); + assert!(net.peer(1).has_block(&b2)); } #[test] @@ -537,7 +544,7 @@ fn syncs_header_only_forks() { sp_tracing::try_init_simple(); let mut net = TestNet::new(0); net.add_full_peer_with_config(Default::default()); - net.add_full_peer_with_config(FullPeerConfig { blocks_pruning: Some(3), ..Default::default() }); + net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); @@ -546,13 +553,9 @@ fn syncs_header_only_forks() { net.peer(1).push_blocks(4, false); // Peer 1 will sync the small fork even though common block state is missing - while !net.peer(1).has_block(small_hash) { + while !net.peer(1).has_block(&small_hash) { net.block_until_idle(); } - - net.block_until_sync(); - assert_eq!(net.peer(0).client().info().best_hash, net.peer(1).client().info().best_hash); - assert_ne!(small_hash, net.peer(0).client().info().best_hash); } #[test] @@ -652,13 +655,19 @@ fn can_sync_to_peers_with_wrong_common_block() { // both peers re-org to the same fork without notifying each other let just = Some((*b"FRNK", Vec::new())); - net.peer(0).client().finalize_block(fork_hash, just.clone(), true).unwrap(); - net.peer(1).client().finalize_block(fork_hash, just, true).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Hash(fork_hash), just.clone(), true) + .unwrap(); + net.peer(1) + .client() + .finalize_block(BlockId::Hash(fork_hash), just, true) + .unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); - assert!(net.peer(1).has_block(final_hash)); + assert!(net.peer(1).has_block(&final_hash)); } /// Returns `is_new_best = true` for each validated announcement. @@ -719,7 +728,7 @@ fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { ForkChoiceStrategy::Custom(false), ); - while !net.peer(2).has_block(block_hash) { + while !net.peer(2).has_block(&block_hash) { net.block_until_idle(); } } @@ -762,7 +771,7 @@ fn wait_until_deferred_block_announce_validation_is_ready() { ForkChoiceStrategy::Custom(false), ); - while !net.peer(1).has_block(block_hash) { + while !net.peer(1).has_block(&block_hash) { net.block_until_idle(); } } @@ -783,7 +792,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { net.block_until_idle(); // The peer should not have synced the block. - assert!(!net.peer(1).has_block(block_hash)); + assert!(!net.peer(1).has_block(&block_hash)); // Make sync protocol aware of the best block net.peer(0).network_service().new_best_block_imported(block_hash, 3); @@ -797,7 +806,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(2).has_block(block_hash) { + if net.peer(2).has_block(&block_hash) { Poll::Ready(()) } else { Poll::Pending @@ -805,7 +814,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { })); // However peer 1 should still not have the block. - assert!(!net.peer(1).has_block(block_hash)); + assert!(!net.peer(1).has_block(&block_hash)); } /// Ensures that if we as a syncing node sync to the tip while we are connected to another peer @@ -826,10 +835,10 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() { net.block_until_connected(); net.block_until_idle(); - assert!(!net.peer(2).has_block(block_hash)); + assert!(!net.peer(2).has_block(&block_hash)); net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); - while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) { + while !net.peer(2).has_block(&block_hash) && !net.peer(1).has_block(&block_hash) { net.block_until_idle(); } } @@ -890,7 +899,7 @@ fn block_announce_data_is_propagated() { let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); net.peer(0).announce_block(block_hash, Some(vec![137])); - while !net.peer(1).has_block(block_hash) || !net.peer(2).has_block(block_hash) { + while !net.peer(1).has_block(&block_hash) || !net.peer(2).has_block(&block_hash) { net.block_until_idle(); } } @@ -934,7 +943,7 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() { let block_hash = net.peer(0).push_blocks(500, true); net.block_until_sync(); - assert!(net.peer(1).has_block(block_hash)); + assert!(net.peer(1).has_block(&block_hash)); } /// When being spammed by the same request of a peer, we ban this peer. However, we should only ban @@ -948,14 +957,14 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { net.peer(0).push_blocks(10, false); net.block_until_sync(); - let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); - // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(hashof10).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); + + let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); // Let's assume block 10 was finalized, but we still need the justification from the network. - net.peer(1).request_justification(&hashof10, 10); + net.peer(1).request_justification(&h1.hash().into(), 10); // Let's build some more blocks and wait always for the network to have synced them for _ in 0..5 { @@ -967,23 +976,16 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { assert_eq!(1, net.peer(0).num_peers()); } - let hashof10 = net - .peer(0) - .client() - .as_backend() - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(10)) - .unwrap(); // Finalize the block and make the justification available. net.peer(0) .client() - .finalize_block(hashof10, Some((*b"FRNK", Vec::new())), true) + .finalize_block(BlockId::Number(10), Some((*b"FRNK", Vec::new())), true) .unwrap(); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justifications(hashof10).unwrap() != + if net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -1098,14 +1100,10 @@ fn syncs_state() { assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); let just = (*b"FRNK", Vec::new()); - let hashof60 = net - .peer(0) + net.peer(1) .client() - .as_backend() - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(60)) + .finalize_block(BlockId::Number(60), Some(just), true) .unwrap(); - net.peer(1).client().finalize_block(hashof60, Some(just), true).unwrap(); // Wait for state sync. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -1160,14 +1158,14 @@ fn syncs_indexed_blocks() { .peer(0) .client() .as_client() - .indexed_transaction(indexed_key) + .indexed_transaction(&indexed_key) .unwrap() .is_some()); assert!(net .peer(1) .client() .as_client() - .indexed_transaction(indexed_key) + .indexed_transaction(&indexed_key) .unwrap() .is_none()); @@ -1176,7 +1174,7 @@ fn syncs_indexed_blocks() { .peer(1) .client() .as_client() - .indexed_transaction(indexed_key) + .indexed_transaction(&indexed_key) .unwrap() .is_some()); } @@ -1194,7 +1192,7 @@ fn warp_sync() { ..Default::default() }); let gap_end = net.peer(0).push_blocks(63, false); - let target = net.peer(0).push_blocks(1, false); + net.peer(0).push_blocks(1, false); net.peer(1).push_blocks(64, false); net.peer(2).push_blocks(64, false); // Wait for peer 1 to sync state. @@ -1205,7 +1203,7 @@ fn warp_sync() { // Wait for peer 1 download block history block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(3).has_body(gap_end) && net.peer(3).has_body(target) { + if net.peer(3).has_block(&gap_end) { Poll::Ready(()) } else { Poll::Pending diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml deleted file mode 100644 index d92c07cd461a8..0000000000000 --- a/client/network/transactions/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -description = "Substrate transaction protocol" -name = "sc-network-transactions" -version = "0.10.0-dev" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -documentation = "https://docs.rs/sc-network-transactions" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -array-bytes = "4.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -futures = "0.3.21" -hex = "0.4.0" -libp2p = "0.49.0" -log = "0.4.17" -pin-project = "1.0.12" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } -sc-network-common = { version = "0.10.0-dev", path = "../common" } -sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/config.rs b/client/network/transactions/src/config.rs deleted file mode 100644 index abb8cccd301ac..0000000000000 --- a/client/network/transactions/src/config.rs +++ /dev/null @@ -1,98 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Configuration of the transaction protocol - -use futures::prelude::*; -use sc_network_common::ExHashT; -use sp_runtime::traits::Block as BlockT; -use std::{collections::HashMap, future::Future, pin::Pin, time}; - -/// Interval at which we propagate transactions; -pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); - -/// Maximum number of known transaction hashes to keep for a peer. -/// -/// This should be approx. 2 blocks full of transactions for the network to function properly. -pub(crate) const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. - -/// Maximum allowed size for a transactions notification. -pub(crate) const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; - -/// Maximum number of transaction validation request we keep at any moment. -pub(crate) const MAX_PENDING_TRANSACTIONS: usize = 8192; - -/// Result of the transaction import. -#[derive(Clone, Copy, Debug)] -pub enum TransactionImport { - /// Transaction is good but already known by the transaction pool. - KnownGood, - /// Transaction is good and not yet known. - NewGood, - /// Transaction is invalid. - Bad, - /// Transaction import was not performed. - None, -} - -/// Future resolving to transaction import result. -pub type TransactionImportFuture = Pin + Send>>; - -/// Transaction pool interface -pub trait TransactionPool: Send + Sync { - /// Get transactions from the pool that are ready to be propagated. - fn transactions(&self) -> Vec<(H, B::Extrinsic)>; - /// Get hash of transaction. - fn hash_of(&self, transaction: &B::Extrinsic) -> H; - /// Import a transaction into the pool. - /// - /// This will return future. - fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap>); - /// Get transaction by hash. - fn transaction(&self, hash: &H) -> Option; -} - -/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always -/// empty and discards all incoming transactions. -/// -/// Requires the "hash" type to implement the `Default` trait. -/// -/// Useful for testing purposes. -pub struct EmptyTransactionPool; - -impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(H, B::Extrinsic)> { - Vec::new() - } - - fn hash_of(&self, _transaction: &B::Extrinsic) -> H { - Default::default() - } - - fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { - Box::pin(future::ready(TransactionImport::KnownGood)) - } - - fn on_broadcasted(&self, _: HashMap>) {} - - fn transaction(&self, _h: &H) -> Option { - None - } -} diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 1e8c802496453..8da2d4be3adde 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -13,24 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" bytes = "1.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" +hex = "0.4" hyper = { version = "0.14.16", features = ["stream", "http2"] } hyper-rustls = { version = "0.23.0", features = ["http2"] } -libp2p = { version = "0.49.0", default-features = false } num_cpus = "1.13" once_cell = "1.8" -parking_lot = "0.12.1" +parking_lot = "0.12.0" rand = "0.7.2" threadpool = "1.7" tracing = "0.1.29" sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } -sc-peerset = { version = "4.0.0-dev", path = "../peerset" } +sc-network = { version = "0.10.0-dev", path = "../network" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 7d3dd8302f343..c80b511c84d17 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -22,7 +22,7 @@ use crate::NetworkProvider; use codec::{Decode, Encode}; use futures::Future; pub use http::SharedClient; -use libp2p::{Multiaddr, PeerId}; +use sc_network::{Multiaddr, PeerId}; use sp_core::{ offchain::{ self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr, @@ -79,8 +79,8 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?array_bytes::bytes2hex("", key), - value = ?array_bytes::bytes2hex("", value), + key = ?hex::encode(key), + value = ?hex::encode(value), "Write", ); match kind { @@ -93,7 +93,7 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?array_bytes::bytes2hex("", key), + key = ?hex::encode(key), "Clear", ); match kind { @@ -112,9 +112,9 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?array_bytes::bytes2hex("", key), - new_value = ?array_bytes::bytes2hex("", new_value), - old_value = ?old_value.as_ref().map(|s| array_bytes::bytes2hex("", s)), + key = ?hex::encode(key), + new_value = ?hex::encode(new_value), + old_value = ?old_value.as_ref().map(hex::encode), "CAS", ); match kind { @@ -132,8 +132,8 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?array_bytes::bytes2hex("", key), - result = ?result.as_ref().map(|s| array_bytes::bytes2hex("", s)), + key = ?hex::encode(key), + result = ?result.as_ref().map(hex::encode), "Read", ); result @@ -324,90 +324,20 @@ impl AsyncApi { #[cfg(test)] mod tests { use super::*; - use libp2p::PeerId; use sc_client_db::offchain::LocalStorage; - use sc_network_common::{ - config::MultiaddrWithPeerId, - protocol::ProtocolName, - service::{NetworkPeers, NetworkStateInfo}, - }; - use sc_peerset::ReputationChange; + use sc_network::{NetworkStateInfo, PeerId}; use sp_core::offchain::{DbExternalities, Externalities}; use std::time::SystemTime; pub(super) struct TestNetwork(); - impl NetworkPeers for TestNetwork { + impl NetworkProvider for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!(); + unimplemented!() } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!(); - } - - fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { - unimplemented!(); - } - - fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { - unimplemented!(); - } - - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { - unimplemented!(); - } - - fn accept_unreserved_peers(&self) { - unimplemented!(); - } - - fn deny_unreserved_peers(&self) { - unimplemented!(); - } - - fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { - unimplemented!(); - } - - fn remove_reserved_peer(&self, _peer_id: PeerId) { - unimplemented!(); - } - - fn set_reserved_peers( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn add_peers_to_reserved_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn add_to_peers_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn sync_num_connected(&self) -> usize { - unimplemented!(); + unimplemented!() } } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 87e79833b9706..d54d491b04c43 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -35,14 +35,14 @@ #![warn(missing_docs)] -use std::{fmt, marker::PhantomData, sync::Arc}; +use std::{collections::HashSet, fmt, marker::PhantomData, sync::Arc}; use futures::{ future::{ready, Future}, prelude::*, }; use parking_lot::Mutex; -use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; +use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; use sp_runtime::{ @@ -60,9 +60,27 @@ const LOG_TARGET: &str = "offchain-worker"; /// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the /// underlying Substrate networking. -pub trait NetworkProvider: NetworkStateInfo + NetworkPeers {} +pub trait NetworkProvider: NetworkStateInfo { + /// Set the authorized peers. + fn set_authorized_peers(&self, peers: HashSet); -impl NetworkProvider for T where T: NetworkStateInfo + NetworkPeers {} + /// Set the authorized only flag. + fn set_authorized_only(&self, reserved_only: bool); +} + +impl NetworkProvider for NetworkService +where + B: traits::Block + 'static, + H: ExHashT, +{ + fn set_authorized_peers(&self, peers: HashSet) { + NetworkService::set_authorized_peers(self, peers) + } + + fn set_authorized_only(&self, reserved_only: bool) { + NetworkService::set_authorized_only(self, reserved_only) + } +} /// Options for [`OffchainWorkers`] pub struct OffchainWorkerOptions { @@ -246,15 +264,13 @@ pub async fn notification_future( mod tests { use super::*; use futures::executor::block_on; - use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName}; - use sc_peerset::ReputationChange; + use sc_network::{Multiaddr, PeerId}; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use std::{collections::HashSet, sync::Arc}; + use std::sync::Arc; use substrate_test_runtime_client::{ runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilderExt, @@ -272,77 +288,13 @@ mod tests { } } - impl NetworkPeers for TestNetwork { + impl NetworkProvider for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!(); + unimplemented!() } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!(); - } - - fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { - unimplemented!(); - } - - fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { - unimplemented!(); - } - - fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { - unimplemented!(); - } - - fn accept_unreserved_peers(&self) { - unimplemented!(); - } - - fn deny_unreserved_peers(&self) { - unimplemented!(); - } - - fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { - unimplemented!(); - } - - fn remove_reserved_peer(&self, _peer_id: PeerId) { - unimplemented!(); - } - - fn set_reserved_peers( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn add_peers_to_reserved_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn add_to_peers_set( - &self, - _protocol: ProtocolName, - _peers: HashSet, - ) -> Result<(), String> { - unimplemented!(); - } - - fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { - unimplemented!(); - } - - fn sync_num_connected(&self) -> usize { - unimplemented!(); + unimplemented!() } } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index ade2bc3d78d03..510ebf7006155 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -libp2p = { version = "0.49.0", default-features = false } +libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -serde_json = "1.0.85" +serde_json = "1.0.79" wasm-timer = "0.2" sc-utils = { version = "4.0.0-dev", path = "../utils" } diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 7c4057154bdb0..101d558663f9f 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" -parking_lot = "0.12.1" +parking_lot = "0.12.0" scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.79" thiserror = "1.0" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index 43fd3325fa598..fe74dea256376 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -32,9 +32,6 @@ pub enum Error { /// The re-execution of the specified block failed. #[error("Failed to re-execute the specified block")] BlockExecutionFailed, - /// Failed to extract the proof. - #[error("Failed to extract the proof")] - ProofExtractionFailed, /// The witness compaction failed. #[error("Failed to create to compact the witness")] WitnessCompactionFailed, @@ -57,8 +54,6 @@ impl From for JsonRpseeError { CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)), Error::WitnessCompactionFailed => CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)), - Error::ProofExtractionFailed => - CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)), Error::UnsafeRpcCalled(e) => e.into(), } .into() diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 40e208c2eba8d..54bf21674a8bd 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -265,7 +265,7 @@ pub trait StateApi { /// [substrate storage][1], [transparent keys in substrate][2], /// [querying substrate storage via rpc][3]. /// - /// [1]: https://docs.substrate.io/main-docs/fundamentals/state-transitions-and-storage/ + /// [1]: https://docs.substrate.io/v3/advanced/storage#storage-map-keys /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ /// diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index 7ddb3f813c249..4561fccc1e81b 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -88,10 +88,10 @@ pub struct SyncState { pub starting_block: Number, /// Height of the current best block of the node. pub current_block: Number, - /// Height of the highest block in the network. - pub highest_block: Number, + /// Height of the highest block learned from the network. Missing if no block is known yet. + #[serde(default = "Default::default", skip_serializing_if = "Option::is_none")] + pub highest_block: Option, } - #[cfg(test)] mod tests { use super::*; @@ -129,7 +129,7 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: 128u32, + highest_block: Some(128u32), }) .unwrap(), r#"{"startingBlock":12,"currentBlock":50,"highestBlock":128}"#, @@ -139,10 +139,10 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: 50u32, + highest_block: None, }) .unwrap(), - r#"{"startingBlock":12,"currentBlock":50,"highestBlock":50}"#, + r#"{"startingBlock":12,"currentBlock":50}"#, ); } } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index ef2e6bec4cdb0..8b40972527be8 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -16,6 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] futures = "0.3.21" jsonrpsee = { version = "0.15.1", features = ["server"] } log = "0.4.17" -serde_json = "1.0.85" +serde_json = "1.0.79" tokio = { version = "1.17.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 7eb825e169bfa..68b4aa6767348 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -198,11 +198,12 @@ fn format_allowed_hosts(addrs: &[SocketAddr]) -> Vec { fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModule { let mut available_methods = rpc_api.method_names().collect::>(); - available_methods.sort(); + available_methods.sort_unstable(); rpc_api .register_method("rpc_methods", move |_, _| { Ok(serde_json::json!({ + "version": 1, "methods": available_methods, })) }) diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 0d77442323241..1c0660fc3528d 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -25,21 +25,6 @@ use prometheus_endpoint::{ }; use std::net::SocketAddr; -/// Histogram time buckets in microseconds. -const HISTOGRAM_BUCKETS: [f64; 11] = [ - 5.0, - 25.0, - 100.0, - 500.0, - 1_000.0, - 2_500.0, - 10_000.0, - 25_000.0, - 100_000.0, - 1_000_000.0, - 10_000_000.0, -]; - /// Metrics for RPC middleware storing information about the number of requests started/completed, /// calls started/completed and their timings. #[derive(Debug, Clone)] @@ -90,8 +75,7 @@ impl RpcMetrics { HistogramOpts::new( "substrate_rpc_calls_time", "Total time [μs] of processed RPC calls", - ) - .buckets(HISTOGRAM_BUCKETS.to_vec()), + ), &["protocol", "method"], )?, metrics_registry, diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml deleted file mode 100644 index 885d415eb50d2..0000000000000 --- a/client/rpc-spec-v2/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "sc-rpc-spec-v2" -version = "0.10.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Substrate RPC interface v2." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } -# Internal chain structures for "chain_spec". -sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } -# Pool for submitting extrinsics required by "transaction" -sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } -sp-core = { version = "6.0.0", path = "../../primitives/core" } -sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } -sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -codec = { package = "parity-scale-codec", version = "3.0.0" } -thiserror = "1.0" -serde = "1.0" -hex = "0.4" -futures = "0.3.21" - -[dev-dependencies] -serde_json = "1.0" -tokio = { version = "1.17.0", features = ["macros"] } diff --git a/client/rpc-spec-v2/README.md b/client/rpc-spec-v2/README.md deleted file mode 100644 index e860e0c2334da..0000000000000 --- a/client/rpc-spec-v2/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Substrate RPC interfaces. - -A collection of RPC methods and subscriptions supported by all substrate clients. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-spec-v2/src/chain_spec/api.rs b/client/rpc-spec-v2/src/chain_spec/api.rs deleted file mode 100644 index dfe2d76de6501..0000000000000 --- a/client/rpc-spec-v2/src/chain_spec/api.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! API trait of the chain spec. - -use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use sc_chain_spec::Properties; - -#[rpc(client, server)] -pub trait ChainSpecApi { - /// Get the chain name, as present in the chain specification. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainSpec_unstable_chainName")] - fn chain_spec_unstable_chain_name(&self) -> RpcResult; - - /// Get the chain's genesis hash. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainSpec_unstable_genesisHash")] - fn chain_spec_unstable_genesis_hash(&self) -> RpcResult; - - /// Get the properties of the chain, as present in the chain specification. - /// - /// # Note - /// - /// The json whitespaces are not guaranteed to persist. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainSpec_unstable_properties")] - fn chain_spec_unstable_properties(&self) -> RpcResult; -} diff --git a/client/rpc-spec-v2/src/chain_spec/chain_spec.rs b/client/rpc-spec-v2/src/chain_spec/chain_spec.rs deleted file mode 100644 index 90d05f1d9d41d..0000000000000 --- a/client/rpc-spec-v2/src/chain_spec/chain_spec.rs +++ /dev/null @@ -1,60 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! API implementation for the specification of a chain. - -use crate::chain_spec::api::ChainSpecApiServer; -use jsonrpsee::core::RpcResult; -use sc_chain_spec::Properties; - -/// An API for chain spec RPC calls. -pub struct ChainSpec { - /// The name of the chain. - name: String, - /// The hexadecimal encoded hash of the genesis block. - genesis_hash: String, - /// Chain properties. - properties: Properties, -} - -impl ChainSpec { - /// Creates a new [`ChainSpec`]. - pub fn new>( - name: String, - genesis_hash: Hash, - properties: Properties, - ) -> Self { - let genesis_hash = format!("0x{}", hex::encode(genesis_hash)); - - Self { name, properties, genesis_hash } - } -} - -impl ChainSpecApiServer for ChainSpec { - fn chain_spec_unstable_chain_name(&self) -> RpcResult { - Ok(self.name.clone()) - } - - fn chain_spec_unstable_genesis_hash(&self) -> RpcResult { - Ok(self.genesis_hash.clone()) - } - - fn chain_spec_unstable_properties(&self) -> RpcResult { - Ok(self.properties.clone()) - } -} diff --git a/client/rpc-spec-v2/src/chain_spec/mod.rs b/client/rpc-spec-v2/src/chain_spec/mod.rs deleted file mode 100644 index cd4fcf246f603..0000000000000 --- a/client/rpc-spec-v2/src/chain_spec/mod.rs +++ /dev/null @@ -1,38 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate chain specification API. -//! -//! The *chain spec* (short for *chain specification*) allows inspecting the content of -//! the specification of the chain that a JSON-RPC server is targeting. -//! -//! The values returned by the API are guaranteed to never change during the lifetime of the -//! JSON-RPC server. -//! -//! # Note -//! -//! Methods are prefixed by `chainSpec`. - -#[cfg(test)] -mod tests; - -pub mod api; -pub mod chain_spec; - -pub use api::ChainSpecApiServer; -pub use chain_spec::ChainSpec; diff --git a/client/rpc-spec-v2/src/chain_spec/tests.rs b/client/rpc-spec-v2/src/chain_spec/tests.rs deleted file mode 100644 index 6c078b2974e98..0000000000000 --- a/client/rpc-spec-v2/src/chain_spec/tests.rs +++ /dev/null @@ -1,61 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::*; -use jsonrpsee::{types::EmptyParams, RpcModule}; -use sc_chain_spec::Properties; - -const CHAIN_NAME: &'static str = "TEST_CHAIN_NAME"; -const CHAIN_GENESIS: [u8; 32] = [0; 32]; -const CHAIN_PROPERTIES: &'static str = r#"{"three": "123", "one": 1, "two": 12}"#; - -fn api() -> RpcModule { - ChainSpec::new( - CHAIN_NAME.to_string(), - CHAIN_GENESIS, - serde_json::from_str(CHAIN_PROPERTIES).unwrap(), - ) - .into_rpc() -} - -#[tokio::test] -async fn chain_spec_chain_name_works() { - let name = api() - .call::<_, String>("chainSpec_unstable_chainName", EmptyParams::new()) - .await - .unwrap(); - assert_eq!(name, CHAIN_NAME); -} - -#[tokio::test] -async fn chain_spec_genesis_hash_works() { - let genesis = api() - .call::<_, String>("chainSpec_unstable_genesisHash", EmptyParams::new()) - .await - .unwrap(); - assert_eq!(genesis, format!("0x{}", hex::encode(CHAIN_GENESIS))); -} - -#[tokio::test] -async fn chain_spec_properties_works() { - let properties = api() - .call::<_, Properties>("chainSpec_unstable_properties", EmptyParams::new()) - .await - .unwrap(); - assert_eq!(properties, serde_json::from_str(CHAIN_PROPERTIES).unwrap()); -} diff --git a/client/rpc-spec-v2/src/lib.rs b/client/rpc-spec-v2/src/lib.rs deleted file mode 100644 index f4b9d2f95bf97..0000000000000 --- a/client/rpc-spec-v2/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate JSON-RPC interface v2. -//! -//! Specification [document](https://paritytech.github.io/json-rpc-interface-spec/). - -#![warn(missing_docs)] -#![deny(unused_crate_dependencies)] - -pub mod chain_spec; -pub mod transaction; - -/// Task executor that is being used by RPC subscriptions. -pub type SubscriptionTaskExecutor = std::sync::Arc; diff --git a/client/rpc-spec-v2/src/transaction/api.rs b/client/rpc-spec-v2/src/transaction/api.rs deleted file mode 100644 index 2f0c799f1cc19..0000000000000 --- a/client/rpc-spec-v2/src/transaction/api.rs +++ /dev/null @@ -1,37 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! API trait for transactions. - -use crate::transaction::event::TransactionEvent; -use jsonrpsee::proc_macros::rpc; -use sp_core::Bytes; - -#[rpc(client, server)] -pub trait TransactionApi { - /// Submit an extrinsic to watch. - /// - /// See [`TransactionEvent`](crate::transaction::event::TransactionEvent) for details on - /// transaction life cycle. - #[subscription( - name = "transaction_unstable_submitAndWatch" => "transaction_unstable_submitExtrinsic", - unsubscribe = "transaction_unstable_unwatch", - item = TransactionEvent, - )] - fn submit_and_watch(&self, bytes: Bytes); -} diff --git a/client/rpc-spec-v2/src/transaction/error.rs b/client/rpc-spec-v2/src/transaction/error.rs deleted file mode 100644 index 72a5959992f9e..0000000000000 --- a/client/rpc-spec-v2/src/transaction/error.rs +++ /dev/null @@ -1,100 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Transaction RPC errors. -//! -//! Errors are interpreted as transaction events for subscriptions. - -use crate::transaction::event::{TransactionError, TransactionEvent}; -use sc_transaction_pool_api::error::Error as PoolError; -use sp_runtime::transaction_validity::InvalidTransaction; - -/// Transaction RPC errors. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Transaction pool error. - #[error("Transaction pool error: {}", .0)] - Pool(#[from] PoolError), - /// Verification error. - #[error("Extrinsic verification error: {}", .0)] - Verification(Box), -} - -impl From for TransactionEvent { - fn from(e: Error) -> Self { - match e { - Error::Verification(e) => TransactionEvent::Invalid(TransactionError { - error: format!("Verification error: {}", e), - }), - Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => - TransactionEvent::Invalid(TransactionError { - error: format!("Invalid transaction with custom error: {}", e), - }), - Error::Pool(PoolError::InvalidTransaction(e)) => { - let msg: &str = e.into(); - TransactionEvent::Invalid(TransactionError { - error: format!("Invalid transaction: {}", msg), - }) - }, - Error::Pool(PoolError::UnknownTransaction(e)) => { - let msg: &str = e.into(); - TransactionEvent::Invalid(TransactionError { - error: format!("Unknown transaction validity: {}", msg), - }) - }, - Error::Pool(PoolError::TemporarilyBanned) => - TransactionEvent::Invalid(TransactionError { - error: "Transaction is temporarily banned".into(), - }), - Error::Pool(PoolError::AlreadyImported(_)) => - TransactionEvent::Invalid(TransactionError { - error: "Transaction is already imported".into(), - }), - Error::Pool(PoolError::TooLowPriority { old, new }) => - TransactionEvent::Invalid(TransactionError { - error: format!( - "The priority of the transactin is too low (pool {} > current {})", - old, new - ), - }), - Error::Pool(PoolError::CycleDetected) => TransactionEvent::Invalid(TransactionError { - error: "The transaction contains a cyclic dependency".into(), - }), - Error::Pool(PoolError::ImmediatelyDropped) => - TransactionEvent::Invalid(TransactionError { - error: "The transaction could not enter the pool because of the limit".into(), - }), - Error::Pool(PoolError::Unactionable) => TransactionEvent::Invalid(TransactionError { - error: "Transaction cannot be propagated and the local node does not author blocks" - .into(), - }), - Error::Pool(PoolError::NoTagsProvided) => TransactionEvent::Invalid(TransactionError { - error: "Transaction does not provide any tags, so the pool cannot identify it" - .into(), - }), - Error::Pool(PoolError::InvalidBlockId(_)) => - TransactionEvent::Invalid(TransactionError { - error: "The provided block ID is not valid".into(), - }), - Error::Pool(PoolError::RejectedFutureTransaction) => - TransactionEvent::Invalid(TransactionError { - error: "The pool is not accepting future transactions".into(), - }), - } - } -} diff --git a/client/rpc-spec-v2/src/transaction/event.rs b/client/rpc-spec-v2/src/transaction/event.rs deleted file mode 100644 index 3c75eaff10fd4..0000000000000 --- a/client/rpc-spec-v2/src/transaction/event.rs +++ /dev/null @@ -1,353 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! The transaction's event returned as json compatible object. - -use serde::{Deserialize, Serialize}; - -/// The transaction was broadcasted to a number of peers. -/// -/// # Note -/// -/// The RPC does not guarantee that the peers have received the -/// transaction. -/// -/// When the number of peers is zero, the event guarantees that -/// shutting down the local node will lead to the transaction -/// not being included in the chain. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionBroadcasted { - /// The number of peers the transaction was broadcasted to. - #[serde(with = "as_string")] - pub num_peers: usize, -} - -/// The transaction was included in a block of the chain. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionBlock { - /// The hash of the block the transaction was included into. - pub hash: Hash, - /// The index (zero-based) of the transaction within the body of the block. - #[serde(with = "as_string")] - pub index: usize, -} - -/// The transaction could not be processed due to an error. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionError { - /// Reason of the error. - pub error: String, -} - -/// The transaction was dropped because of exceeding limits. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionDropped { - /// True if the transaction was broadcasted to other peers and - /// may still be included in the block. - pub broadcasted: bool, - /// Reason of the event. - pub error: String, -} - -/// Possible transaction status events. -/// -/// The status events can be grouped based on their kinds as: -/// -/// 1. Runtime validated the transaction: -/// - `Validated` -/// -/// 2. Inside the `Ready` queue: -/// - `Broadcast` -/// -/// 3. Leaving the pool: -/// - `BestChainBlockIncluded` -/// - `Invalid` -/// -/// 4. Block finalized: -/// - `Finalized` -/// -/// 5. At any time: -/// - `Dropped` -/// - `Error` -/// -/// The subscription's stream is considered finished whenever the following events are -/// received: `Finalized`, `Error`, `Invalid` or `Dropped`. However, the user is allowed -/// to unsubscribe at any moment. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -// We need to manually specify the trait bounds for the `Hash` trait to ensure `into` and -// `from` still work. -#[serde(bound( - serialize = "Hash: Serialize + Clone", - deserialize = "Hash: Deserialize<'de> + Clone" -))] -#[serde(into = "TransactionEventIR", from = "TransactionEventIR")] -pub enum TransactionEvent { - /// The transaction was validated by the runtime. - Validated, - /// The transaction was broadcasted to a number of peers. - Broadcasted(TransactionBroadcasted), - /// The transaction was included in a best block of the chain. - /// - /// # Note - /// - /// This may contain `None` if the block is no longer a best - /// block of the chain. - BestChainBlockIncluded(Option>), - /// The transaction was included in a finalized block. - Finalized(TransactionBlock), - /// The transaction could not be processed due to an error. - Error(TransactionError), - /// The transaction is marked as invalid. - Invalid(TransactionError), - /// The client was not capable of keeping track of this transaction. - Dropped(TransactionDropped), -} - -/// Intermediate representation (IR) for the transaction events -/// that handles block events only. -/// -/// The block events require a JSON compatible interpretation similar to: -/// -/// ```json -/// { event: "EVENT", block: { hash: "0xFF", index: 0 } } -/// ``` -/// -/// This IR is introduced to circumvent that the block events need to -/// be serialized/deserialized with "tag" and "content", while other -/// events only require "tag". -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event", content = "block")] -enum TransactionEventBlockIR { - /// The transaction was included in the best block of the chain. - BestChainBlockIncluded(Option>), - /// The transaction was included in a finalized block of the chain. - Finalized(TransactionBlock), -} - -/// Intermediate representation (IR) for the transaction events -/// that handles non-block events only. -/// -/// The non-block events require a JSON compatible interpretation similar to: -/// -/// ```json -/// { event: "EVENT", num_peers: 0 } -/// ``` -/// -/// This IR is introduced to circumvent that the block events need to -/// be serialized/deserialized with "tag" and "content", while other -/// events only require "tag". -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event")] -enum TransactionEventNonBlockIR { - Validated, - Broadcasted(TransactionBroadcasted), - Error(TransactionError), - Invalid(TransactionError), - Dropped(TransactionDropped), -} - -/// Intermediate representation (IR) used for serialization/deserialization of the -/// [`TransactionEvent`] in a JSON compatible format. -/// -/// Serde cannot mix `#[serde(tag = "event")]` with `#[serde(tag = "event", content = "block")]` -/// for specific enum variants. Therefore, this IR is introduced to circumvent this -/// restriction, while exposing a simplified [`TransactionEvent`] for users of the -/// rust ecosystem. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound(serialize = "Hash: Serialize", deserialize = "Hash: Deserialize<'de>"))] -#[serde(rename_all = "camelCase")] -#[serde(untagged)] -enum TransactionEventIR { - Block(TransactionEventBlockIR), - NonBlock(TransactionEventNonBlockIR), -} - -impl From> for TransactionEventIR { - fn from(value: TransactionEvent) -> Self { - match value { - TransactionEvent::Validated => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), - TransactionEvent::Broadcasted(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), - TransactionEvent::BestChainBlockIncluded(event) => - TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), - TransactionEvent::Finalized(event) => - TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)), - TransactionEvent::Error(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)), - TransactionEvent::Invalid(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)), - TransactionEvent::Dropped(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)), - } - } -} - -impl From> for TransactionEvent { - fn from(value: TransactionEventIR) -> Self { - match value { - TransactionEventIR::NonBlock(status) => match status { - TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, - TransactionEventNonBlockIR::Broadcasted(event) => - TransactionEvent::Broadcasted(event), - TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), - TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), - TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), - }, - TransactionEventIR::Block(block) => match block { - TransactionEventBlockIR::Finalized(event) => TransactionEvent::Finalized(event), - TransactionEventBlockIR::BestChainBlockIncluded(event) => - TransactionEvent::BestChainBlockIncluded(event), - }, - } - } -} - -/// Serialize and deserialize helper as string. -mod as_string { - use super::*; - use serde::{Deserializer, Serializer}; - - pub fn serialize(data: &usize, serializer: S) -> Result { - data.to_string().serialize(serializer) - } - - pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { - String::deserialize(deserializer)? - .parse() - .map_err(|e| serde::de::Error::custom(format!("Parsing failed: {}", e))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::H256; - - #[test] - fn validated_event() { - let event: TransactionEvent<()> = TransactionEvent::Validated; - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"validated"}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - - #[test] - fn broadcasted_event() { - let event: TransactionEvent<()> = - TransactionEvent::Broadcasted(TransactionBroadcasted { num_peers: 2 }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"broadcasted","numPeers":"2"}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - - #[test] - fn best_chain_event() { - let event: TransactionEvent<()> = TransactionEvent::BestChainBlockIncluded(None); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"bestChainBlockIncluded","block":null}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - - let event: TransactionEvent = - TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { - hash: H256::from_low_u64_be(1), - index: 2, - })); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"bestChainBlockIncluded","block":{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","index":"2"}}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - - #[test] - fn finalized_event() { - let event: TransactionEvent = TransactionEvent::Finalized(TransactionBlock { - hash: H256::from_low_u64_be(1), - index: 10, - }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"finalized","block":{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","index":"10"}}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - - #[test] - fn error_event() { - let event: TransactionEvent<()> = - TransactionEvent::Error(TransactionError { error: "abc".to_string() }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"error","error":"abc"}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - - #[test] - fn invalid_event() { - let event: TransactionEvent<()> = - TransactionEvent::Invalid(TransactionError { error: "abc".to_string() }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"invalid","error":"abc"}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - - #[test] - fn dropped_event() { - let event: TransactionEvent<()> = TransactionEvent::Dropped(TransactionDropped { - broadcasted: true, - error: "abc".to_string(), - }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"dropped","broadcasted":true,"error":"abc"}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } -} diff --git a/client/rpc-spec-v2/src/transaction/mod.rs b/client/rpc-spec-v2/src/transaction/mod.rs deleted file mode 100644 index bb983894a428c..0000000000000 --- a/client/rpc-spec-v2/src/transaction/mod.rs +++ /dev/null @@ -1,38 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate transaction API. -//! -//! The transaction methods allow submitting a transaction and subscribing to -//! its status updates generated by the chain. -//! -//! # Note -//! -//! Methods are prefixed by `transaction`. - -pub mod api; -pub mod error; -pub mod event; -pub mod transaction; - -pub use api::TransactionApiServer; -pub use event::{ - TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, - TransactionEvent, -}; -pub use transaction::Transaction; diff --git a/client/rpc-spec-v2/src/transaction/transaction.rs b/client/rpc-spec-v2/src/transaction/transaction.rs deleted file mode 100644 index e2cf736dff17a..0000000000000 --- a/client/rpc-spec-v2/src/transaction/transaction.rs +++ /dev/null @@ -1,208 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! API implementation for submitting transactions. - -use crate::{ - transaction::{ - api::TransactionApiServer, - error::Error, - event::{ - TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, - TransactionEvent, - }, - }, - SubscriptionTaskExecutor, -}; -use jsonrpsee::{ - core::async_trait, - types::{ - error::{CallError, ErrorObject}, - SubscriptionResult, - }, - SubscriptionSink, -}; -use sc_transaction_pool_api::{ - error::IntoPoolError, BlockHash, TransactionFor, TransactionPool, TransactionSource, - TransactionStatus, -}; -use std::sync::Arc; - -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_core::Bytes; -use sp_runtime::{generic, traits::Block as BlockT}; - -use codec::Decode; -use futures::{FutureExt, StreamExt, TryFutureExt}; - -/// An API for transaction RPC calls. -pub struct Transaction { - /// Substrate client. - client: Arc, - /// Transactions pool. - pool: Arc, - /// Executor to spawn subscriptions. - executor: SubscriptionTaskExecutor, -} - -impl Transaction { - /// Creates a new [`Transaction`]. - pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { - Transaction { client, pool, executor } - } -} - -/// Currently we treat all RPC transactions as externals. -/// -/// Possibly in the future we could allow opt-in for special treatment -/// of such transactions, so that the block authors can inject -/// some unique transactions via RPC and have them included in the pool. -const TX_SOURCE: TransactionSource = TransactionSource::External; - -/// Extrinsic has an invalid format. -/// -/// # Note -/// -/// This is similar to the old `author` API error code. -const BAD_FORMAT: i32 = 1001; - -#[async_trait] -impl TransactionApiServer> for Transaction -where - Pool: TransactionPool + Sync + Send + 'static, - Pool::Hash: Unpin, - ::Hash: Unpin, - Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, -{ - fn submit_and_watch(&self, mut sink: SubscriptionSink, xt: Bytes) -> SubscriptionResult { - // This is the only place where the RPC server can return an error for this - // subscription. Other defects must be signaled as events to the sink. - let decoded_extrinsic = match TransactionFor::::decode(&mut &xt[..]) { - Ok(decoded_extrinsic) => decoded_extrinsic, - Err(e) => { - let err = CallError::Custom(ErrorObject::owned( - BAD_FORMAT, - format!("Extrinsic has invalid format: {}", e), - None::<()>, - )); - let _ = sink.reject(err); - return Ok(()) - }, - }; - - let best_block_hash = self.client.info().best_hash; - - let submit = self - .pool - .submit_and_watch( - &generic::BlockId::hash(best_block_hash), - TX_SOURCE, - decoded_extrinsic, - ) - .map_err(|e| { - e.into_pool_error() - .map(Error::from) - .unwrap_or_else(|e| Error::Verification(Box::new(e))) - }); - - let fut = async move { - match submit.await { - Ok(stream) => { - let mut state = TransactionState::new(); - let stream = - stream.filter_map(|event| async move { state.handle_event(event) }); - sink.pipe_from_stream(stream.boxed()).await; - }, - Err(err) => { - // We have not created an `Watcher` for the tx. Make sure the - // error is still propagated as an event. - let event: TransactionEvent<::Hash> = err.into(); - sink.pipe_from_stream(futures::stream::once(async { event }).boxed()).await; - }, - }; - }; - - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - Ok(()) - } -} - -/// The transaction's state that needs to be preserved between -/// multiple events generated by the transaction-pool. -/// -/// # Note -/// -/// In the future, the RPC server can submit only the last event when multiple -/// identical events happen in a row. -#[derive(Clone, Copy)] -struct TransactionState { - /// True if the transaction was previously broadcasted. - broadcasted: bool, -} - -impl TransactionState { - /// Construct a new [`TransactionState`]. - pub fn new() -> Self { - TransactionState { broadcasted: false } - } - - /// Handle events generated by the transaction-pool and convert them - /// to the new API expected state. - #[inline] - pub fn handle_event( - &mut self, - event: TransactionStatus, - ) -> Option> { - match event { - TransactionStatus::Ready | TransactionStatus::Future => - Some(TransactionEvent::::Validated), - TransactionStatus::Broadcast(peers) => { - // Set the broadcasted flag once if we submitted the transaction to - // at least one peer. - self.broadcasted = self.broadcasted || !peers.is_empty(); - - Some(TransactionEvent::Broadcasted(TransactionBroadcasted { - num_peers: peers.len(), - })) - }, - TransactionStatus::InBlock((hash, index)) => - Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { - hash, - index, - }))), - TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), - TransactionStatus::FinalityTimeout(_) => - Some(TransactionEvent::Dropped(TransactionDropped { - broadcasted: self.broadcasted, - error: "Maximum number of finality watchers has been reached".into(), - })), - TransactionStatus::Finalized((hash, index)) => - Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), - TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic was rendered invalid by another extrinsic".into(), - })), - TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic dropped from the pool due to exceeding limits".into(), - })), - TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic marked as invalid".into(), - })), - } - } -} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 4131fecaf510e..5a05ae6e29df1 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -19,8 +19,8 @@ hash-db = { version = "0.15.2", default-features = false } jsonrpsee = { version = "0.15.1", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.17" -parking_lot = "0.12.1" -serde_json = "1.0.85" +parking_lot = "0.12.0" +serde_json = "1.0.79" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } @@ -46,7 +46,6 @@ assert_matches = "1.3.0" lazy_static = "1.4.0" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-network = { version = "0.10.0-dev", path = "../network" } -sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } tokio = "1.17.0" diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index 1e6dbd5aca148..f09da200ff587 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -40,7 +40,7 @@ async fn should_return_header() { Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: res.state_root, + state_root: res.state_root.clone(), extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -54,7 +54,7 @@ async fn should_return_header() { Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: res.state_root, + state_root: res.state_root.clone(), extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -93,7 +93,7 @@ async fn should_return_a_block() { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: res.block.header.state_root, + state_root: res.block.header.state_root.clone(), extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -110,7 +110,7 @@ async fn should_return_a_block() { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: res.block.header.state_root, + state_root: res.block.header.state_root.clone(), extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -198,7 +198,6 @@ async fn should_return_finalized_hash() { // import new block let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); // no finalization yet @@ -206,9 +205,9 @@ async fn should_return_finalized_hash() { assert_eq!(res, client.genesis_hash()); // finalize - client.finalize_block(block_hash, None).unwrap(); + client.finalize_block(BlockId::number(1), None).unwrap(); let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); - assert_eq!(res, block_hash); + assert_eq!(res, client.block_hash(1).unwrap().unwrap()); } #[tokio::test] @@ -233,9 +232,8 @@ async fn test_head_subscription(method: &str) { let api = new_full(client.clone(), test_executor()).into_rpc(); let sub = api.subscribe(method, EmptyParams::new()).await.unwrap(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); - client.finalize_block(block_hash, None).unwrap(); + client.finalize_block(BlockId::number(1), None).unwrap(); sub }; diff --git a/client/rpc/src/dev/tests.rs b/client/rpc/src/dev/tests.rs index 816f3cdfe6025..f3b18690d0972 100644 --- a/client/rpc/src/dev/tests.rs +++ b/client/rpc/src/dev/tests.rs @@ -43,8 +43,8 @@ async fn block_stats_work() { .await .unwrap(), Some(BlockStats { - witness_len: 630, - witness_compact_len: 534, + witness_len: 597, + witness_compact_len: 500, block_len: 99, num_extrinsics: 0, }), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 64b6cacaad700..42ba70b0af7e7 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -145,9 +145,10 @@ where ) -> Result<()> { for block_hash in &range.hashes { let mut block_changes = StorageChangeSet { block: *block_hash, changes: Vec::new() }; + let id = BlockId::hash(*block_hash); for key in keys { let (has_changed, data) = { - let curr_data = self.client.storage(*block_hash, key).map_err(client_err)?; + let curr_data = self.client.storage(&id, key).map_err(client_err)?; match last_values.get(key) { Some(prev_data) => (curr_data != *prev_data, curr_data), None => (true, curr_data), @@ -213,7 +214,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_keys(block, &prefix)) + .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) .map_err(client_err) } @@ -223,7 +224,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_pairs(block, &prefix)) + .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) .map_err(client_err) } @@ -236,7 +237,11 @@ where ) -> std::result::Result, Error> { self.block_or_best(block) .and_then(|block| { - self.client.storage_keys_iter(block, prefix.as_ref(), start_key.as_ref()) + self.client.storage_keys_iter( + &BlockId::Hash(block), + prefix.as_ref(), + start_key.as_ref(), + ) }) .map(|iter| iter.take(count as usize).collect()) .map_err(client_err) @@ -248,7 +253,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage(block, &key)) + .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) .map_err(client_err) } @@ -262,14 +267,14 @@ where Err(e) => return Err(client_err(e)), }; - match self.client.storage(block, &key) { + match self.client.storage(&BlockId::Hash(block), &key) { Ok(Some(d)) => return Ok(Some(d.0.len() as u64)), Err(e) => return Err(client_err(e)), Ok(None) => {}, } self.client - .storage_pairs(block, &key) + .storage_pairs(&BlockId::Hash(block), &key) .map(|kv| { let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); if item_sum > 0 { @@ -287,7 +292,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_hash(block, &key)) + .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) .map_err(client_err) } @@ -345,8 +350,8 @@ where self.block_or_best(block) .and_then(|block| { self.client - .read_proof(block, &mut keys.iter().map(|key| key.0.as_ref())) - .map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect()) + .read_proof(&BlockId::Hash(block), &mut keys.iter().map(|key| key.0.as_ref())) + .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) .map_err(client_err) @@ -413,7 +418,7 @@ where let changes = keys .into_iter() .map(|key| { - let v = self.client.storage(block, &key).ok().flatten(); + let v = self.client.storage(&BlockId::Hash(block), &key).ok().flatten(); (key, v) }) .collect(); @@ -494,11 +499,11 @@ where }; self.client .read_child_proof( - block, + &BlockId::Hash(block), &child_info, &mut keys.iter().map(|key| key.0.as_ref()), ) - .map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect()) + .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) .map_err(client_err) @@ -517,7 +522,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_keys(block, &child_info, &prefix) + self.client.child_storage_keys(&BlockId::Hash(block), &child_info, &prefix) }) .map_err(client_err) } @@ -538,7 +543,7 @@ where None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( - block, + &BlockId::Hash(block), child_info, prefix.as_ref(), start_key.as_ref(), @@ -561,7 +566,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage(block, &child_info, &key) + self.client.child_storage(&BlockId::Hash(block), &child_info, &key) }) .map_err(client_err) } @@ -584,7 +589,10 @@ where keys.into_iter() .map(move |key| { - client.clone().child_storage(block, &child_info, &key).map_err(client_err) + client + .clone() + .child_storage(&BlockId::Hash(block), &child_info, &key) + .map_err(client_err) }) .collect() } @@ -602,7 +610,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_hash(block, &child_info, &key) + self.client.child_storage_hash(&BlockId::Hash(block), &child_info, &key) }) .map_err(client_err) } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 2f91648008ff7..77acdf8418ccc 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -99,7 +99,7 @@ fn api>>(sync: T) -> RpcModule> { ); }, Request::NetworkAddReservedPeer(peer, sender) => { - let _ = match sc_network_common::config::parse_str_addr(&peer) { + let _ = match sc_network::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), @@ -123,7 +123,7 @@ fn api>>(sync: T) -> RpcModule> { let _ = sender.send(SyncState { starting_block: 1, current_block: 2, - highest_block: 3, + highest_block: Some(3), }); }, }; @@ -297,7 +297,10 @@ async fn system_node_roles() { async fn system_sync_state() { let sync_state: SyncState = api(None).call("system_syncState", EmptyParams::new()).await.unwrap(); - assert_eq!(sync_state, SyncState { starting_block: 1, current_block: 2, highest_block: 3 }); + assert_eq!( + sync_state, + SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } + ); } #[tokio::test] diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index a0c8f21effec1..e8ddf40a0ae03 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -20,21 +20,20 @@ rocksdb = ["sc-client-db/rocksdb"] wasmtime = ["sc-executor/wasmtime"] # exposes the client type test-helpers = [] -runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server"] } thiserror = "1.0.30" futures = "0.3.21" rand = "0.7.3" -parking_lot = "0.12.1" +parking_lot = "0.12.0" log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" -pin-project = "1.0.12" +pin-project = "1.0.10" hash-db = "0.15.2" serde = "1.0.136" -serde_json = "1.0.85" +serde_json = "1.0.79" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-trie = { version = "6.0.0", path = "../../primitives/trie" } @@ -52,11 +51,9 @@ sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "6.0.0", path = "../../primitives/storage" } sc-network = { version = "0.10.0-dev", path = "../network" } -sc-network-bitswap = { version = "0.10.0-dev", path = "../network/bitswap" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } -sc-network-transactions = { version = "0.10.0-dev", path = "../network/transactions" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } @@ -69,7 +66,6 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/a sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../primitives/transaction-storage-proof" } sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } sc-rpc = { version = "4.0.0-dev", path = "../rpc" } -sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../rpc-spec-v2" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" } sc-informant = { version = "0.10.0-dev", path = "../informant" } @@ -81,14 +77,13 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -parity-util-mem = { version = "0.12.0", default-features = false, features = [ +parity-util-mem = { version = "0.11.0", default-features = false, features = [ "primitive-types", ] } -async-trait = "0.1.57" +async-trait = "0.1.50" tokio = { version = "1.17.0", features = ["time", "rt-multi-thread", "parking_lot"] } tempfile = "3.1.0" directories = "4.0.1" -static_init = "1.0.3" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 3cb064ec814c5..ec537a33b72d5 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -38,16 +38,10 @@ use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; -use sc_network_bitswap::BitswapRequestHandler; -use sc_network_common::{ - protocol::role::Roles, - service::{NetworkStateInfo, NetworkStatusProvider}, - sync::warp::WarpSyncProvider, -}; +use sc_network_common::sync::warp::WarpSyncProvider; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, - state_request_handler::StateRequestHandler, + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync, }; use sc_rpc::{ @@ -58,7 +52,6 @@ use sc_rpc::{ system::SystemApiServer, DenyUnsafe, SubscriptionTaskExecutor, }; -use sc_rpc_spec_v2::transaction::TransactionApiServer; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; @@ -210,10 +203,11 @@ where let (client, backend) = { let db_config = sc_client_db::DatabaseSettings { - trie_cache_maximum_size: config.trie_cache_maximum_size, + state_cache_size: config.state_cache_size, + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - blocks_pruning: config.blocks_pruning, + keep_blocks: config.keep_blocks, }; let backend = new_db_backend(db_config)?; @@ -325,29 +319,6 @@ where ) } -/// Shared network instance implementing a set of mandatory traits. -pub trait SpawnTaskNetwork: - sc_offchain::NetworkProvider - + NetworkStateInfo - + NetworkStatusProvider - + Send - + Sync - + 'static -{ -} - -impl SpawnTaskNetwork for T -where - Block: BlockT, - T: sc_offchain::NetworkProvider - + NetworkStateInfo - + NetworkStatusProvider - + Send - + Sync - + 'static, -{ -} - /// Parameters to pass into `build`. pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. @@ -366,12 +337,9 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub rpc_builder: Box Result, Error>>, /// A shared network instance. - pub network: Arc>, + pub network: Arc::Hash>>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, - /// Controller for transactions handlers - pub tx_handler_controller: - sc_network_transactions::TransactionsHandlerController<::Hash>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, } @@ -381,7 +349,7 @@ pub fn build_offchain_workers( config: &Configuration, spawn_handle: SpawnTaskHandle, client: Arc, - network: Arc, + network: Arc::Hash>>, ) -> Option>> where TBl: BlockT, @@ -450,7 +418,6 @@ where rpc_builder, network, system_rpc_tx, - tx_handler_controller, telemetry, } = params; @@ -486,11 +453,7 @@ where spawn_handle.spawn( "on-transaction-imported", Some("transaction-pool"), - transaction_notifications( - transaction_pool.clone(), - tx_handler_controller, - telemetry.clone(), - ), + transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()), ); // Prometheus metrics. @@ -553,21 +516,19 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( - transaction_pool: Arc, - tx_handler_controller: sc_network_transactions::TransactionsHandlerController< - ::Hash, - >, +async fn transaction_notifications( + transaction_pool: Arc, + network: Arc::Hash>>, telemetry: Option, ) where - Block: BlockT, - ExPool: MaintainedTransactionPool::Hash>, + TBl: BlockT, + TExPool: MaintainedTransactionPool::Hash>, { // transaction notifications transaction_pool .import_notification_stream() .for_each(move |hash| { - tx_handler_controller.propagate_transaction(hash); + network.propagate_transaction(hash); let status = transaction_pool.status(); telemetry!( telemetry; @@ -581,18 +542,13 @@ async fn transaction_notifications( .await; } -fn init_telemetry( +fn init_telemetry>( config: &mut Configuration, - network: Network, - client: Arc, + network: Arc::Hash>>, + client: Arc, telemetry: &mut Telemetry, sysinfo: Option, -) -> sc_telemetry::Result -where - Block: BlockT, - Client: BlockBackend, - Network: NetworkStateInfo, -{ +) -> sc_telemetry::Result { let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); let connection_message = ConnectionMessage { name: config.network.node_name.to_owned(), @@ -675,13 +631,6 @@ where (chain, state, child_state) }; - let transaction_v2 = sc_rpc_spec_v2::transaction::Transaction::new( - client.clone(), - transaction_pool.clone(), - task_executor.clone(), - ) - .into_rpc(); - let author = sc_rpc::author::Author::new( client.clone(), transaction_pool, @@ -699,10 +648,6 @@ where rpc_api.merge(offchain).map_err(|e| Error::Application(e.into()))?; } - // Part of the RPC v2 spec. - rpc_api.merge(transaction_v2).map_err(|e| Error::Application(e.into()))?; - - // Part of the old RPC spec. rpc_api.merge(chain).map_err(|e| Error::Application(e.into()))?; rpc_api.merge(author).map_err(|e| Error::Application(e.into()))?; rpc_api.merge(system).map_err(|e| Error::Application(e.into()))?; @@ -733,6 +678,7 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// An optional warp sync provider. pub warp_sync: Option>>, } + /// Build the network service, the network status sinks and an RPC sender. pub fn build_network( params: BuildNetworkParams, @@ -740,7 +686,6 @@ pub fn build_network( ( Arc::Hash>>, TracingUnboundedSender>, - sc_network_transactions::TransactionsHandlerController<::Hash>, NetworkStarter, ), Error, @@ -769,8 +714,6 @@ where warp_sync, } = params; - let mut request_response_protocol_configs = Vec::new(); - if warp_sync.is_none() && config.network.sync_mode.is_warp() { return Err("Warp sync enabled, but no warp sync provider configured.".into()) } @@ -783,6 +726,9 @@ where } } + let transaction_pool_adapter = + Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }); + let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { @@ -795,7 +741,6 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = BlockRequestHandler::new( &protocol_id, - config.chain_spec.fork_id(), client.clone(), config.network.default_peers_set.in_peers as usize + config.network.default_peers_set.out_peers as usize, @@ -808,7 +753,6 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, - config.chain_spec.fork_id(), client.clone(), config.network.default_peers_set_num_full as usize, ); @@ -819,16 +763,8 @@ where let (warp_sync_provider, warp_sync_protocol_config) = warp_sync .map(|provider| { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = WarpSyncRequestHandler::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - config.chain_spec.fork_id(), - provider.clone(), - ); + let (handler, protocol_config) = + WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); (Some(provider), Some(protocol_config)) }) @@ -836,17 +772,13 @@ where let light_client_request_protocol_config = { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = LightClientRequestHandler::new( - &protocol_id, - config.chain_spec.fork_id(), - client.clone(), - ); + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); protocol_config }; - let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (chain_sync, chain_sync_service) = ChainSync::new( + let chain_sync = ChainSync::new( match config.network.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, SyncMode::Fast { skip_proofs, storage_chain_mode } => @@ -857,29 +789,8 @@ where block_announce_validator, config.network.max_parallel_downloads, warp_sync_provider, - chain_sync_network_handle, )?; - - let block_announce_config = chain_sync.get_block_announce_proto_config( - protocol_id.clone(), - &config.chain_spec.fork_id().map(ToOwned::to_owned), - Roles::from(&config.role.clone()), - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ); - - request_response_protocol_configs.push(config.network.ipfs_server.then(|| { - let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); - spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler.run()); - protocol_config - })); - - let mut network_params = sc_network::config::Params { + let network_params = sc_network::config::Params { role: config.role.clone(), executor: { let spawn_handle = Clone::clone(&spawn_handle); @@ -887,57 +798,29 @@ where spawn_handle.spawn("libp2p-node", Some("networking"), fut); })) }, + transactions_handler_executor: { + let spawn_handle = Clone::clone(&spawn_handle); + Box::new(move |fut| { + spawn_handle.spawn("network-transactions-handler", Some("networking"), fut); + }) + }, network_config: config.network.clone(), chain: client.clone(), - protocol_id: protocol_id.clone(), - fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), + transaction_pool: transaction_pool_adapter as _, + protocol_id, import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), - chain_sync_service, metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), - block_announce_config, block_request_protocol_config, state_request_protocol_config, warp_sync_protocol_config, light_client_request_protocol_config, - request_response_protocol_configs: request_response_protocol_configs - .into_iter() - .flatten() - .collect::>(), }; - // crate transactions protocol and add it to the list of supported protocols of `network_params` - let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - config.chain_spec.fork_id(), - ); - network_params - .network_config - .extra_sets - .insert(0, transactions_handler_proto.set_config()); - let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); - let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( - network.clone(), - Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), - config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); - spawn_handle.spawn( - "chain-sync-network-service-provider", - Some("networking"), - chain_sync_network_provider.run(network.clone()), - ); - let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let future = build_network_future( @@ -985,7 +868,7 @@ where future.await }); - Ok((network, system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx))) + Ok((network, system_rpc_tx, NetworkStarter(network_start_tx))) } /// Object used to start the network. @@ -993,11 +876,6 @@ where pub struct NetworkStarter(oneshot::Sender<()>); impl NetworkStarter { - /// Create a new NetworkStarter - pub fn new(sender: oneshot::Sender<()>) -> Self { - NetworkStarter(sender) - } - /// Start the network. Call this after all sub-components have been initialized. /// /// > **Note**: If you don't call this function, the networking will not work. diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index ca7a070086f45..ffe91d0d7355e 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -19,20 +19,25 @@ use crate::error::Error; use sc_client_api::{StorageProvider, UsageProvider}; use sp_core::storage::{well_known_keys, ChildInfo, Storage, StorageChild, StorageKey, StorageMap}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{collections::HashMap, sync::Arc}; /// Export the raw state at the given `block`. If `block` is `None`, the /// best block will be used. -pub fn export_raw_state(client: Arc, hash: B::Hash) -> Result +pub fn export_raw_state( + client: Arc, + block: Option>, +) -> Result where C: UsageProvider + StorageProvider, B: BlockT, BA: sc_client_api::backend::Backend, { + let block = block.unwrap_or_else(|| BlockId::Hash(client.usage_info().chain.best_hash)); + let empty_key = StorageKey(Vec::new()); - let mut top_storage = client.storage_pairs(hash, &empty_key)?; + let mut top_storage = client.storage_pairs(&block, &empty_key)?; let mut children_default = HashMap::new(); // Remove all default child storage roots from the top storage and collect the child storage @@ -47,10 +52,10 @@ where StorageKey(key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec()); let child_info = ChildInfo::new_default(&key.0); - let keys = client.child_storage_keys(hash, &child_info, &empty_key)?; + let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; let mut pairs = StorageMap::new(); keys.into_iter().try_for_each(|k| { - if let Some(value) = client.child_storage(hash, &child_info, &k)? { + if let Some(value) = client.child_storage(&block, &child_info, &k)? { pairs.insert(k.0, value.0); } diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index 3ee4399d063b3..9a3ce6024ed92 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -40,13 +40,6 @@ where info!("There aren't any non-finalized blocks to revert."); } else { info!("Reverted {} blocks. Best: #{} ({})", reverted.0, info.best_number, info.best_hash); - - if reverted.0 > blocks { - info!( - "Number of reverted blocks is higher than requested \ - because of reverted leaves higher than the best block." - ) - } } Ok(()) } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index a1a012dcedd9f..1e8114df13339 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -17,17 +17,21 @@ // along with this program. If not, see . use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use codec::{Decode, Encode}; use sc_client_api::{backend, call_executor::CallExecutor, HeaderBackend}; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_api::{ProofRecorder, StorageTransactionCache}; -use sp_core::traits::{CodeExecutor, RuntimeCode, SpawnNamed}; +use sp_core::{ + traits::{CodeExecutor, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, +}; use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ - backend::AsTrieBackend, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, }; -use std::{cell::RefCell, sync::Arc}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; /// Call executor that executes methods locally, querying all required /// data from local backend. @@ -147,15 +151,18 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(at_hash)?; + let state = self.backend.state_at(*at)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; - let mut sm = StateMachine::new( + let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) + })?; + + let return_data = StateMachine::new( &state, &mut changes, &self.executor, @@ -165,17 +172,22 @@ where &runtime_code, self.spawn_handle.clone(), ) - .set_parent_hash(at_hash); + .set_parent_hash(at_hash) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + )?; - sm.execute_using_consensus_failure_handler(strategy.get_manager()) - .map_err(Into::into) + Ok(return_data.into_encoded()) } fn contextual_call< EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, at: &BlockId, @@ -184,19 +196,23 @@ where changes: &RefCell, storage_transaction_cache: Option<&RefCell>>, execution_manager: ExecutionManager, + native_call: Option, recorder: &Option>, extensions: Option, - ) -> Result, sp_blockchain::Error> + ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone, { let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(at_hash)?; + let state = self.backend.state_at(*at)?; let changes = &mut *changes.borrow_mut(); + let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) + })?; + // It is important to extract the runtime code here before we create the proof // recorder to not record it. We also need to fetch the runtime code from `state` to // make sure we use the caching layers. @@ -208,11 +224,15 @@ where match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend(); - - let backend = sp_state_machine::TrieBackendBuilder::wrap(&trie_state) - .with_recorder(recorder.clone()) - .build(); + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + + let backend = sp_state_machine::ProvingBackend::new_with_recorder( + trie_state, + recorder.clone(), + ); let mut state_machine = StateMachine::new( &backend, @@ -226,7 +246,10 @@ where ) .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler(execution_manager) + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) }, None => { let mut state_machine = StateMachine::new( @@ -241,7 +264,10 @@ where ) .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler(execution_manager) + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) }, } .map_err(Into::into) @@ -249,9 +275,7 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - - let at_hash = self.backend.blockchain().expect_block_hash_from_id(id)?; - let state = self.backend.state_at(at_hash)?; + let state = self.backend.state_at(*id)?; let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -268,10 +292,12 @@ where method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(at_hash)?; + let state = self.backend.state_at(*at)?; - let trie_backend = state.as_trie_backend(); + let trie_backend = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 1d896d8acd8bf..d61d7f7fa3781 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -22,6 +22,7 @@ use super::{ block_rules::{BlockRules, LookupResult as BlockLookupResult}, genesis, }; +use codec::{Decode, Encode}; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; @@ -58,9 +59,12 @@ use sp_blockchain::{ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sp_core::storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, Storage, StorageChild, StorageData, - StorageKey, +use sp_core::{ + storage::{ + well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, Storage, StorageChild, + StorageData, StorageKey, + }, + NativeOrEncoded, }; #[cfg(feature = "test-helpers")] use sp_keystore::SyncCryptoStorePtr; @@ -81,7 +85,9 @@ use sp_trie::{CompactProof, StorageProof}; use std::{ collections::{hash_map::DefaultHasher, HashMap, HashSet}, marker::PhantomData, + panic::UnwindSafe, path::PathBuf, + result, sync::Arc, }; @@ -414,14 +420,13 @@ where } /// Get a reference to the state at a given block. - pub fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { - self.backend.state_at(hash) + pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { + self.backend.state_at(*block) } /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(id)?; - Ok(StorageProvider::storage(self, hash, &StorageKey(well_known_keys::CODE.to_vec()))? + Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? .expect( "None is returned if there's no value stored for the given key;\ ':code' key is always defined; qed", @@ -586,7 +591,8 @@ where Some(storage_changes) => { let storage_changes = match storage_changes { sc_consensus::StorageChanges::Changes(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, parent_hash)?; + self.backend + .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = storage_changes.into_inner(); @@ -641,7 +647,7 @@ where if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. - warn!("Error importing state: State root mismatch."); + warn!("Error imporing state: State root mismatch."); return Err(Error::InvalidStateRoot) } None @@ -656,7 +662,7 @@ where // Ensure parent chain is finalized to maintain invariant that finality is called // sequentially. - if finalized && parent_exists && info.finalized_hash != parent_hash { + if finalized && parent_exists { self.apply_finality_with_block_hash( operation, parent_hash, @@ -813,7 +819,7 @@ where Block::new(import_block.header.clone(), body.clone()), )?; - let state = self.backend.state_at(*parent_hash)?; + let state = self.backend.state_at(at)?; let gen_storage_changes = runtime_api .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; @@ -877,17 +883,17 @@ where // plugable we cannot make a better choice here. usages that need // an accurate "best" block need to go through `SelectChain` // instead. - operation.op.mark_head(block)?; + operation.op.mark_head(BlockId::Hash(block))?; } let enacted = route_from_finalized.enacted(); assert!(enacted.len() > 0); for finalize_new in &enacted[..enacted.len() - 1] { - operation.op.mark_finalized(finalize_new.hash, None)?; + operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?; } assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(block, justification)?; + operation.op.mark_finalized(BlockId::Hash(block), justification)?; if notify { let finalized = @@ -1033,7 +1039,7 @@ where }; match hash_and_number { Some((hash, number)) => - if self.backend.have_state_at(hash, number) { + if self.backend.have_state_at(&hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) @@ -1053,9 +1059,9 @@ where /// Get block body by id. pub fn body( &self, - hash: Block::Hash, + id: &BlockId, ) -> sp_blockchain::Result::Extrinsic>>> { - self.backend.blockchain().body(hash) + self.backend.blockchain().body(*id) } /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. @@ -1151,61 +1157,60 @@ where { fn read_proof( &self, - hash: Block::Hash, + id: &BlockId, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(hash) - .and_then(|state| prove_read(state, keys).map_err(Into::into)) + self.state_at(id).and_then(|state| prove_read(state, keys).map_err(Into::into)) } fn read_child_proof( &self, - hash: Block::Hash, + id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(hash) + self.state_at(id) .and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into)) } fn execution_proof( &self, - hash: Block::Hash, + id: &BlockId, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - self.executor.prove_execution(&BlockId::Hash(hash), method, call_data) + self.executor.prove_execution(id, method, call_data) } fn read_proof_collection( &self, - hash: Block::Hash, + id: &BlockId, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)> { - let state = self.state_at(hash)?; + let state = self.state_at(id)?; // this is a read proof, using version V0 or V1 is equivalent. let root = state.storage_root(std::iter::empty(), StateVersion::V0).0; let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>( state, size_limit, start_key, )?; - let proof = proof - .into_compact_proof::>(root) + // This is read proof only, we can use either LayoutV0 or LayoutV1. + let proof = sp_trie::encode_compact::>>(proof, root) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; Ok((proof, count)) } fn storage_collection( &self, - hash: Block::Hash, + id: &BlockId, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())) } - let state = self.state_at(hash)?; + let state = self.state_at(id)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { @@ -1322,7 +1327,7 @@ where Some(&root), ) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; - let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build(); + let proving_backend = sp_state_machine::TrieBackend::new(db, root); let state = read_range_proof_check_with_child_on_proving_backend::>( &proving_backend, start_key, @@ -1398,19 +1403,19 @@ where { fn storage_keys( &self, - hash: Block::Hash, + id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let keys = self.state_at(hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); + let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } fn storage_pairs( &self, - hash: ::Hash, + id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let state = self.state_at(hash)?; + let state = self.state_at(id)?; let keys = state .keys(&key_prefix.0) .into_iter() @@ -1424,34 +1429,34 @@ where fn storage_keys_iter<'a>( &self, - hash: ::Hash, + id: &BlockId, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let state = self.state_at(hash)?; + let state = self.state_at(id)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new(state, prefix, start_key)) } fn child_storage_keys_iter<'a>( &self, - hash: ::Hash, + id: &BlockId, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let state = self.state_at(hash)?; + let state = self.state_at(id)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new_child(state, child_info, prefix, start_key)) } fn storage( &self, - hash: Block::Hash, + id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { Ok(self - .state_at(hash)? + .state_at(id)? .storage(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1459,22 +1464,22 @@ where fn storage_hash( &self, - hash: ::Hash, + id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - self.state_at(hash)? + self.state_at(id)? .storage_hash(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } fn child_storage_keys( &self, - hash: ::Hash, + id: &BlockId, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { let keys = self - .state_at(hash)? + .state_at(id)? .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) @@ -1484,12 +1489,12 @@ where fn child_storage( &self, - hash: ::Hash, + id: &BlockId, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { Ok(self - .state_at(hash)? + .state_at(id)? .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1497,11 +1502,11 @@ where fn child_storage_hash( &self, - hash: ::Hash, + id: &BlockId, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { - self.state_at(hash)? + self.state_at(id)? .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } @@ -1654,23 +1659,27 @@ where { type StateBackend = B::State; - fn call_api_at( + fn call_api_at< + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( &self, - params: CallApiAtParams, - ) -> Result, sp_api::ApiError> { + params: CallApiAtParams, + ) -> Result, sp_api::ApiError> { let at = params.at; let (manager, extensions) = self.execution_extensions.manager_and_extensions(at, params.context); self.executor - .contextual_call( + .contextual_call:: _, _, _>( at, params.function, ¶ms.arguments, params.overlayed_changes, Some(params.storage_transaction_cache), manager, + params.native_call, params.recorder, Some(extensions), ) @@ -1680,11 +1689,6 @@ where fn runtime_version_at(&self, at: &BlockId) -> Result { CallExecutor::runtime_version(&self.executor, at).map_err(Into::into) } - - fn state_at(&self, at: &BlockId) -> Result { - let hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - self.state_at(hash).map_err(Into::into) - } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport @@ -1844,22 +1848,29 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - hash: Block::Hash, + id: BlockId, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { let last_best = self.backend.blockchain().info().best_hash; - self.apply_finality_with_block_hash(operation, hash, justification, last_best, notify) + let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.apply_finality_with_block_hash( + operation, + to_finalize_hash, + justification, + last_best, + notify, + ) } fn finalize_block( &self, - hash: Block::Hash, + id: BlockId, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { self.lock_import_and_run(|operation| { - self.apply_finality(operation, hash, justification, notify) + self.apply_finality(operation, id, justification, notify) }) } } @@ -1873,20 +1884,20 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - hash: Block::Hash, + id: BlockId, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { - (**self).apply_finality(operation, hash, justification, notify) + (**self).apply_finality(operation, id, justification, notify) } fn finalize_block( &self, - hash: Block::Hash, + id: BlockId, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { - (**self).finalize_block(hash, justification, notify) + (**self).finalize_block(id, justification, notify) } } @@ -1939,22 +1950,16 @@ where { fn block_body( &self, - hash: Block::Hash, + id: &BlockId, ) -> sp_blockchain::Result::Extrinsic>>> { - self.body(hash) + self.body(id) } fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { - Ok(match self.header(id)? { - Some(header) => { - let hash = header.hash(); - match (self.body(hash)?, self.justifications(hash)?) { - (Some(extrinsics), justifications) => - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), - _ => None, - } - }, - None => None, + Ok(match (self.header(id)?, self.body(id)?, self.justifications(id)?) { + (Some(header), Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), + _ => None, }) } @@ -1962,24 +1967,27 @@ where Client::block_status(self, id) } - fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { - self.backend.blockchain().justifications(hash) + fn justifications(&self, id: &BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().justifications(*id) } fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { self.backend.blockchain().hash(number) } - fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>> { self.backend.blockchain().indexed_transaction(hash) } - fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { + fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { self.backend.blockchain().has_indexed_transaction(hash) } - fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>> { - self.backend.blockchain().block_indexed_body(hash) + fn block_indexed_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result>>> { + self.backend.blockchain().block_indexed_body(*id) } fn requires_full_sync(&self) -> bool { @@ -2070,19 +2078,9 @@ where &self, number: NumberFor, ) -> Result>>, sp_transaction_storage_proof::Error> { - let hash = match self - .backend - .blockchain() - .block_hash_from_id(&BlockId::Number(number)) - .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))? - { - Some(hash) => hash, - None => return Ok(None), - }; - self.backend .blockchain() - .block_indexed_body(hash) + .block_indexed_body(BlockId::number(number)) .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index bca0697bcbd08..0eeb6e05cee16 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -19,16 +19,19 @@ //! Service configuration. pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; -pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; +pub use sc_client_db::{Database, DatabaseSource, KeepBlocks, PruningMode}; pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; pub use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig, Role}, + config::{ + MultiaddrWithPeerId, NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, + SetConfig, TransportConfig, + }, Multiaddr, }; pub use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, + config::ProtocolId, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -67,16 +70,16 @@ pub struct Configuration { pub keystore_remote: Option, /// Configuration for the database. pub database: DatabaseSource, - /// Maximum size of internal trie cache in bytes. - /// - /// If `None` is given the cache is disabled. - pub trie_cache_maximum_size: Option, + /// Size of internal state cache in Bytes + pub state_cache_size: usize, + /// Size in percent of cache size dedicated to child tries + pub state_cache_child_ratio: Option, /// State pruning settings. pub state_pruning: Option, /// Number of blocks to keep in the db. /// /// NOTE: only finalized blocks are subject for removal! - pub blocks_pruning: BlocksPruning, + pub keep_blocks: KeepBlocks, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. @@ -260,43 +263,31 @@ impl Default for RpcMethods { } } -#[static_init::dynamic(drop, lazy)] -static mut BASE_PATH_TEMP: Option = None; - -/// The base path that is used for everything that needs to be written on disk to run a node. +/// The base path that is used for everything that needs to be write on disk to run a node. #[derive(Debug)] -pub struct BasePath { - path: PathBuf, +pub enum BasePath { + /// A temporary directory is used as base path and will be deleted when dropped. + Temporary(TempDir), + /// A path on the disk. + Permanenent(PathBuf), } impl BasePath { /// Create a `BasePath` instance using a temporary directory prefixed with "substrate" and use /// it as base path. /// - /// Note: The temporary directory will be created automatically and deleted when the program - /// exits. Every call to this function will return the same path for the lifetime of the - /// program. + /// Note: the temporary directory will be created automatically and deleted when the `BasePath` + /// instance is dropped. pub fn new_temp_dir() -> io::Result { - let mut temp = BASE_PATH_TEMP.write(); - - match &*temp { - Some(p) => Ok(Self::new(p.path())), - None => { - let temp_dir = tempfile::Builder::new().prefix("substrate").tempdir()?; - let path = PathBuf::from(temp_dir.path()); - - *temp = Some(temp_dir); - Ok(Self::new(path)) - }, - } + Ok(BasePath::Temporary(tempfile::Builder::new().prefix("substrate").tempdir()?)) } /// Create a `BasePath` instance based on an existing path on disk. /// /// Note: this function will not ensure that the directory exist nor create the directory. It /// will also not delete the directory when the instance is dropped. - pub fn new>(path: P) -> BasePath { - Self { path: path.into() } + pub fn new>(path: P) -> BasePath { + BasePath::Permanenent(path.as_ref().to_path_buf()) } /// Create a base path from values describing the project. @@ -310,7 +301,10 @@ impl BasePath { /// Retrieve the base path. pub fn path(&self) -> &Path { - &self.path + match self { + BasePath::Temporary(temp_dir) => temp_dir.path(), + BasePath::Permanenent(path) => path.as_path(), + } } /// Returns the configuration directory inside this base path. diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 001a83922d776..0d702c7f37b98 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -19,6 +19,7 @@ //! Errors that can occur during the service operation. use sc_keystore; +use sc_network; use sp_blockchain; use sp_consensus; @@ -40,7 +41,7 @@ pub enum Error { Consensus(#[from] sp_consensus::Error), #[error(transparent)] - Network(#[from] sc_network_common::error::Error), + Network(#[from] sc_network::error::Error), #[error(transparent)] Keystore(#[from] sc_keystore::Error), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 091b4bbe9fe5f..98bcb17174157 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,11 +42,9 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; -use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock}; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; -use sp_consensus::SyncOracle; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, @@ -62,7 +60,7 @@ pub use self::{ error::Error, }; pub use config::{ - BasePath, BlocksPruning, Configuration, DatabaseSource, PruningMode, Role, RpcMethods, TaskType, + BasePath, Configuration, DatabaseSource, KeepBlocks, PruningMode, Role, RpcMethods, TaskType, }; pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, @@ -72,7 +70,7 @@ pub use sc_chain_spec::{ pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use sc_network_transactions::config::{TransactionImport, TransactionImportFuture}; +pub use sc_network::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::{ RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, }; @@ -148,7 +146,7 @@ async fn build_network_future< + Send + Sync + 'static, - H: sc_network_common::ExHashT, + H: sc_network::ExHashT, >( role: Role, mut network: sc_network::NetworkWorker, @@ -230,15 +228,8 @@ async fn build_network_future< } } sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { - let result = match MultiaddrWithPeerId::try_from(peer_addr) { - Ok(peer) => { - network.add_reserved_peer(peer) - }, - Err(err) => { - Err(err.to_string()) - }, - }; - let x = result.map_err(sc_rpc::system::error::Error::MalformattedPeerArg); + let x = network.add_reserved_peer(peer_addr) + .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); let _ = sender.send(x); } sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { @@ -273,12 +264,10 @@ async fn build_network_future< sc_rpc::system::Request::SyncState(sender) => { use sc_rpc::system::SyncState; - let best_number = client.info().best_number; - let _ = sender.send(SyncState { starting_block, - current_block: best_number, - highest_block: network.best_seen_block().unwrap_or(best_number), + current_block: client.info().best_number, + highest_block: network.best_seen_block(), }); } } @@ -415,8 +404,7 @@ where .collect() } -impl sc_network_transactions::config::TransactionPool - for TransactionPoolAdapter +impl sc_network::config::TransactionPool for TransactionPoolAdapter where C: HeaderBackend + BlockBackend diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 13b249a7b9563..ef3132f61ab99 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -22,8 +22,7 @@ use crate::config::Configuration; use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::config::Role; -use sc_network_common::service::{NetworkStatus, NetworkStatusProvider}; +use sc_network::{config::Role, NetworkService, NetworkStatus}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; @@ -183,16 +182,15 @@ impl MetricsService { /// Returns a never-ending `Future` that performs the /// metric and telemetry updates with information from /// the given sources. - pub async fn run( + pub async fn run( mut self, client: Arc, transactions: Arc, - network: TNet, + network: Arc::Hash>>, ) where TBl: Block, TCl: ProvideRuntimeApi + UsageProvider, TExPool: MaintainedTransactionPool::Hash>, - TNet: NetworkStatusProvider, { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 1f934a6e5355f..d003db57eb7ac 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -12,12 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" fdlimit = "0.2.1" futures = "0.3.21" +hex = "0.4" +hex-literal = "0.3.4" log = "0.4.17" parity-scale-codec = "3.0.0" -parking_lot = "0.12.1" +parking_lot = "0.12.0" tempfile = "3.1.0" tokio = { version = "1.17.0", features = ["time"] } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } @@ -26,7 +27,6 @@ sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../.. sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../executor" } sc-network = { version = "0.10.0-dev", path = "../../network" } -sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 788f119130ac0..136efad088fae 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -17,12 +17,13 @@ // along with this program. If not, see . use futures::executor::block_on; +use hex_literal::hex; use parity_scale_codec::{Decode, Encode, Joiner}; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ - in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, HeaderBackend, StorageProvider, + in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, }; -use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; +use sc_client_db::{Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode}; use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; @@ -154,9 +155,7 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); @@ -884,11 +872,11 @@ fn import_with_justification() { assert_eq!(client.chain_info().finalized_hash, a3.hash()); - assert_eq!(client.justifications(a3.hash()).unwrap(), Some(justification)); + assert_eq!(client.justifications(&BlockId::Hash(a3.hash())).unwrap(), Some(justification)); - assert_eq!(client.justifications(a1.hash()).unwrap(), None); + assert_eq!(client.justifications(&BlockId::Hash(a1.hash())).unwrap(), None); - assert_eq!(client.justifications(a2.hash()).unwrap(), None); + assert_eq!(client.justifications(&BlockId::Hash(a2.hash())).unwrap(), None); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); @@ -999,7 +987,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // we finalize block B1 which is on a different branch from current best // which should trigger a re-org. - ClientExt::finalize_block(&client, b1.hash(), None).unwrap(); + ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); // B1 should now be the latest finalized assert_eq!(client.chain_info().finalized_hash, b1.hash()); @@ -1023,7 +1011,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { assert_eq!(client.chain_info().best_hash, b3.hash()); - ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); + ClientExt::finalize_block(&client, BlockId::Hash(b3.hash()), None).unwrap(); finality_notification_check(&mut finality_notifications, &[b1.hash()], &[]); finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[a2.hash()]); @@ -1121,7 +1109,7 @@ fn finality_notifications_content() { // Postpone import to test behavior of import of finalized block. - ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); + ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); @@ -1131,14 +1119,6 @@ fn finality_notifications_content() { assert!(finality_notifications.try_next().is_err()); } -#[test] -fn get_block_by_bad_block_hash_returns_none() { - let client = substrate_test_runtime_client::new(); - - let hash = H256::from_low_u64_be(5); - assert!(client.block(&BlockId::Hash(hash)).unwrap().is_none()); -} - #[test] fn get_header_by_block_number_doesnt_panic() { let client = substrate_test_runtime_client::new(); @@ -1217,9 +1197,10 @@ fn doesnt_import_blocks_that_revert_finality() { let backend = Arc::new( Backend::new( DatabaseSettings { - trie_cache_maximum_size: Some(1 << 20), + state_cache_size: 1 << 20, + state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::KeepAll, + keep_blocks: KeepBlocks::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1285,7 +1266,7 @@ fn doesnt_import_blocks_that_revert_finality() { // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesn't include it - ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); + ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = @@ -1320,7 +1301,7 @@ fn doesnt_import_blocks_that_revert_finality() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); - ClientExt::finalize_block(&client, a3.hash(), None).unwrap(); + ClientExt::finalize_block(&client, BlockId::Hash(a3.hash()), None).unwrap(); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); @@ -1352,9 +1333,9 @@ fn respects_block_rules() { .block; let params = BlockCheckParams { - hash: block_ok.hash(), + hash: block_ok.hash().clone(), number: 0, - parent_hash: *block_ok.header().parent_hash(), + parent_hash: block_ok.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1368,9 +1349,9 @@ fn respects_block_rules() { let block_not_ok = block_not_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_not_ok.hash(), + hash: block_not_ok.hash().clone(), number: 0, - parent_hash: *block_not_ok.header().parent_hash(), + parent_hash: block_not_ok.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1391,15 +1372,15 @@ fn respects_block_rules() { let block_ok = block_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_ok.hash(), + hash: block_ok.hash().clone(), number: 1, - parent_hash: *block_ok.header().parent_hash(), + parent_hash: block_ok.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, }; if record_only { - fork_rules.push((1, block_ok.hash())); + fork_rules.push((1, block_ok.hash().clone())); } assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); @@ -1410,9 +1391,9 @@ fn respects_block_rules() { let block_not_ok = block_not_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_not_ok.hash(), + hash: block_not_ok.hash().clone(), number: 1, - parent_hash: *block_not_ok.header().parent_hash(), + parent_hash: block_not_ok.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1443,9 +1424,10 @@ fn returns_status_for_pruned_blocks() { let backend = Arc::new( Backend::new( DatabaseSettings { - trie_cache_maximum_size: Some(1 << 20), - state_pruning: Some(PruningMode::blocks_pruning(1)), - blocks_pruning: BlocksPruning::KeepFinalized, + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + state_pruning: Some(PruningMode::keep_blocks(1)), + keep_blocks: KeepBlocks::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1475,9 +1457,9 @@ fn returns_status_for_pruned_blocks() { let b1 = b1.build().unwrap().block; let check_block_a1 = BlockCheckParams { - hash: a1.hash(), + hash: a1.hash().clone(), number: 0, - parent_hash: *a1.header().parent_hash(), + parent_hash: a1.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1512,9 +1494,9 @@ fn returns_status_for_pruned_blocks() { block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { - hash: a2.hash(), + hash: a2.hash().clone(), number: 1, - parent_hash: *a1.header().parent_hash(), + parent_hash: a1.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1546,9 +1528,9 @@ fn returns_status_for_pruned_blocks() { block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { - hash: a3.hash(), + hash: a3.hash().clone(), number: 2, - parent_hash: *a2.header().parent_hash(), + parent_hash: a2.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1581,9 +1563,9 @@ fn returns_status_for_pruned_blocks() { ); let mut check_block_b1 = BlockCheckParams { - hash: b1.hash(), + hash: b1.hash().clone(), number: 0, - parent_hash: *b1.header().parent_hash(), + parent_hash: b1.header().parent_hash().clone(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1613,42 +1595,36 @@ fn storage_keys_iter_prefix_and_start_key_works() { .add_extra_child_storage(&child_info, b"third".to_vec(), vec![0u8; 32]) .build(); - let block_hash = client.info().best_hash; - let child_root = b":child_storage:default:child".to_vec(); - let prefix = StorageKey(array_bytes::hex2bytes_unchecked("3a")); + let prefix = StorageKey(hex!("3a").to_vec()); let child_prefix = StorageKey(b"sec".to_vec()); let res: Vec<_> = client - .storage_keys_iter(block_hash, Some(&prefix), None) + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() .map(|x| x.0) .collect(); assert_eq!( res, - [ - child_root.clone(), - array_bytes::hex2bytes_unchecked("3a636f6465"), - array_bytes::hex2bytes_unchecked("3a686561707061676573"), - ] + [child_root.clone(), hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec(),] ); let res: Vec<_> = client .storage_keys_iter( - block_hash, + &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), + Some(&StorageKey(hex!("3a636f6465").to_vec())), ) .unwrap() .map(|x| x.0) .collect(); - assert_eq!(res, [array_bytes::hex2bytes_unchecked("3a686561707061676573")]); + assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); let res: Vec<_> = client .storage_keys_iter( - block_hash, + &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a686561707061676573"))), + Some(&StorageKey(hex!("3a686561707061676573").to_vec())), ) .unwrap() .map(|x| x.0) @@ -1656,7 +1632,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { assert_eq!(res, Vec::>::new()); let res: Vec<_> = client - .child_storage_keys_iter(block_hash, child_info.clone(), Some(&child_prefix), None) + .child_storage_keys_iter(&BlockId::Number(0), child_info.clone(), Some(&child_prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1664,7 +1640,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .child_storage_keys_iter( - block_hash, + &BlockId::Number(0), child_info, None, Some(&StorageKey(b"second".to_vec())), @@ -1679,15 +1655,13 @@ fn storage_keys_iter_prefix_and_start_key_works() { fn storage_keys_iter_works() { let client = substrate_test_runtime_client::new(); - let block_hash = client.info().best_hash; - - let prefix = StorageKey(array_bytes::hex2bytes_unchecked("")); + let prefix = StorageKey(hex!("").to_vec()); let res: Vec<_> = client - .storage_keys_iter(block_hash, Some(&prefix), None) + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() - .take(9) - .map(|x| array_bytes::bytes2hex("", &x.0)) + .take(8) + .map(|x| hex::encode(&x.0)) .collect(); assert_eq!( res, @@ -1698,7 +1672,6 @@ fn storage_keys_iter_works() { "1a560ecfd2a62c2b8521ef149d0804eb621050e3988ed97dca55f0d7c3e6aa34", "1d66850d32002979d67dd29dc583af5b2ae2a1f71c1f35ad90fff122be7a3824", "237498b98d8803334286e9f0483ef513098dd3c1c22ca21c4dc155b4ef6cc204", - "26aa394eea5630e07c48ae0c9558cef75e0621c4869aa60c02be9adcc98a0d1d", "29b9db10ec5bf7907d8f74b5e60aa8140c4fbdd8127a1ee5600cb98e5ec01729", "3a636f6465", ] @@ -1706,13 +1679,13 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - block_hash, + &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), + Some(&StorageKey(hex!("3a636f6465").to_vec())), ) .unwrap() .take(7) - .map(|x| array_bytes::bytes2hex("", &x.0)) + .map(|x| hex::encode(&x.0)) .collect(); assert_eq!( res, @@ -1723,30 +1696,30 @@ fn storage_keys_iter_works() { "5c2d5fda66373dabf970e4fb13d277ce91c5233473321129d32b5a8085fa8133", "6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081", "66484000ed3f75c95fc7b03f39c20ca1e1011e5999278247d3b2f5e3c3273808", - "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", + "79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d", ] ); let res: Vec<_> = client .storage_keys_iter( - block_hash, + &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(array_bytes::hex2bytes_unchecked( - "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", - ))), + Some(&StorageKey( + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + )), ) .unwrap() .take(5) - .map(|x| array_bytes::bytes2hex("", &x.0)) + .map(|x| hex::encode(x.0)) .collect(); assert_eq!( res, [ + "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", "811ecfaadcf5f2ee1d67393247e2f71a1662d433e8ce7ff89fb0d4aa9561820b", "a93d74caa7ec34ea1b04ce1e5c090245f867d333f0f88278a451e45299654dc5", "a9ee1403384afbfc13f13be91ff70bfac057436212e53b9733914382ac942892", "cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f", - "e3b47b6c84c0493481f97c5197d2554f", ] ); } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 5d29d34a3cbf2..749c83c6eeac7 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -22,16 +22,15 @@ use futures::{task::Poll, Future, TryFutureExt as _}; use log::{debug, info}; use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; -use sc_network::{config::NetworkConfiguration, multiaddr}; -use sc_network_common::{ - config::{MultiaddrWithPeerId, TransportConfig}, - service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, +use sc_network::{ + config::{NetworkConfiguration, TransportConfig}, + multiaddr, Multiaddr, }; use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, - BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, - RuntimeGenesis, SpawnTaskHandle, TaskManager, + ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis, + SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; use sp_api::BlockId; @@ -49,8 +48,8 @@ const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, - full_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, + authority_nodes: Vec<(usize, F, U, Multiaddr)>, + full_nodes: Vec<(usize, F, U, Multiaddr)>, chain_spec: GenericChainSpec, base_port: u16, nodes: usize, @@ -232,9 +231,10 @@ fn node_config< keystore_remote: Default::default(), keystore: KeystoreConfig::Path { path: root.join("key"), password: None }, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - trie_cache_maximum_size: Some(16 * 1024 * 1024), + state_cache_size: 16777216, + state_cache_child_ratio: None, state_pruning: Default::default(), - blocks_pruning: BlocksPruning::KeepFinalized, + keep_blocks: KeepBlocks::All, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), @@ -320,7 +320,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() }; + addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -340,7 +340,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() }; + addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -382,12 +382,12 @@ where for (_, service, _, _) in network.full_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.clone()) + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full(move |_index, service| { - let connected = service.network().sync_num_connected(); + let connected = service.network().num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); @@ -414,7 +414,7 @@ where if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { service .network() - .add_reserved_peer(address) + .add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } @@ -422,7 +422,7 @@ where } network.run_until_all_full(move |_index, service| { - let connected = service.network().sync_num_connected(); + let connected = service.network().num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); @@ -479,7 +479,7 @@ pub fn sync( for (_, service, _, _) in network.full_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.clone()) + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } @@ -532,13 +532,13 @@ pub fn consensus( for (_, service, _, _) in network.full_nodes.iter() { service .network() - .add_reserved_peer(first_address.clone()) + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.clone()) + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full(|_index, service| { @@ -556,7 +556,7 @@ pub fn consensus( for (_, service, _, _) in network.full_nodes.iter() { service .network() - .add_reserved_peer(first_address.clone()) + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 7f9a502aef8e9..08856ce3f48a9 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } log = "0.4.17" -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" -parking_lot = "0.12.1" +parking_lot = "0.12.0" sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 01a198a1b3c1e..d5cca9a342187 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -51,7 +51,7 @@ use log::trace; use noncanonical::NonCanonicalOverlay; use parity_util_mem::{malloc_size, MallocSizeOf}; use parking_lot::RwLock; -use pruning::{HaveBlock, RefWindow}; +use pruning::RefWindow; use sc_client_api::{MemorySize, StateDbMemoryInfo}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -62,7 +62,6 @@ const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical"; const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; -pub(crate) const DEFAULT_MAX_BLOCK_CONSTRAINT: u32 = 256; /// Database value type. pub type DBValue = Vec; @@ -116,14 +115,12 @@ pub trait NodeDb { } /// Error type. -#[derive(Eq, PartialEq)] pub enum Error { /// Database backend error. Db(E), StateDb(StateDbError), } -#[derive(Eq, PartialEq)] pub enum StateDbError { /// `Codec` decoding error. Decoding(codec::Error), @@ -141,10 +138,6 @@ pub enum StateDbError { BlockAlreadyExists, /// Invalid metadata Metadata(String), - /// Trying to get a block record from db while it is not commit to db yet - BlockUnavailable, - /// Block record is missing from the pruning window - BlockMissing, } impl From for Error { @@ -189,9 +182,6 @@ impl fmt::Debug for StateDbError { Self::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), Self::BlockAlreadyExists => write!(f, "Block already exists"), Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), - Self::BlockUnavailable => - write!(f, "Trying to get a block record from db while it is not commit to db yet"), - Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), } } } @@ -237,7 +227,7 @@ pub enum PruningMode { impl PruningMode { /// Create a mode that keeps given number of blocks. - pub fn blocks_pruning(n: u32) -> PruningMode { + pub fn keep_blocks(n: u32) -> PruningMode { PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) } @@ -276,7 +266,7 @@ impl Default for PruningMode { impl Default for Constraints { fn default() -> Self { - Self { max_blocks: Some(DEFAULT_MAX_BLOCK_CONSTRAINT), max_mem: None } + Self { max_blocks: Some(256), max_mem: None } } } @@ -286,42 +276,38 @@ fn to_meta_key(suffix: &[u8], data: &S) -> Vec { buffer } -pub struct StateDbSync { +struct StateDbSync { mode: PruningMode, non_canonical: NonCanonicalOverlay, - pruning: Option>, + pruning: Option>, pinned: HashMap, - ref_counting: bool, } -impl - StateDbSync -{ - fn new( +impl StateDbSync { + fn new( mode: PruningMode, ref_counting: bool, - db: D, - ) -> Result, Error> { + db: &D, + ) -> Result, Error> { trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; - let pruning: Option> = match mode { + let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; + let pruning: Option> = match mode { PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), - PruningMode::Constrained(Constraints { max_blocks, .. }) => - Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?), + PruningMode::Constrained(_) => Some(RefWindow::new(db, ref_counting)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; - Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default(), ref_counting }) + Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default() }) } - fn insert_block( + fn insert_block( &mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, Error> { match self.mode { PruningMode::ArchiveAll => { changeset.deleted.clear(); @@ -335,23 +321,25 @@ impl } } - fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { - // NOTE: it is important that the change to `LAST_CANONICAL` (emit from - // `non_canonical.canonicalize`) and the insert of the new pruning journal (emit from - // `pruning.note_canonical`) are collected into the same `CommitSet` and are committed to - // the database atomically to keep their consistency when restarting the node + fn canonicalize_block( + &mut self, + hash: &BlockHash, + ) -> Result, Error> { let mut commit = CommitSet::default(); if self.mode == PruningMode::ArchiveAll { return Ok(commit) } - let number = self.non_canonical.canonicalize(hash, &mut commit)?; - if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); - } + match self.non_canonical.canonicalize(hash, &mut commit) { + Ok(()) => + if self.mode == PruningMode::ArchiveCanonical { + commit.data.deleted.clear(); + }, + Err(e) => return Err(e.into()), + }; if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(hash, number, &mut commit)?; + pruning.note_canonical(hash, &mut commit); } - self.prune(&mut commit)?; + self.prune(&mut commit); Ok(commit) } @@ -359,31 +347,22 @@ impl self.non_canonical.last_canonicalized_block_number() } - fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { + fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { match self.mode { - PruningMode::ArchiveAll => IsPruned::NotPruned, + PruningMode::ArchiveAll => false, PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { if self.best_canonical().map(|c| number > c).unwrap_or(true) { - if self.non_canonical.have_block(hash) { - IsPruned::NotPruned - } else { - IsPruned::Pruned - } + !self.non_canonical.have_block(hash) } else { - match self.pruning.as_ref() { - None => IsPruned::NotPruned, - Some(pruning) => match pruning.have_block(hash, number) { - HaveBlock::No => IsPruned::Pruned, - HaveBlock::Yes => IsPruned::NotPruned, - HaveBlock::Maybe => IsPruned::MaybePruned, - }, - } + self.pruning.as_ref().map_or(false, |pruning| { + number < pruning.pending() || !pruning.have_block(hash) + }) } }, } } - fn prune(&mut self, commit: &mut CommitSet) -> Result<(), Error> { + fn prune(&mut self, commit: &mut CommitSet) { if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { @@ -397,23 +376,12 @@ impl } let pinned = &self.pinned; - match pruning.next_hash() { - // the block record is temporary unavailable, break and try next time - Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, - res => - if res?.map_or(false, |h| pinned.contains_key(&h)) { - break - }, - } - match pruning.prune_one(commit) { - // this branch should not reach as previous `next_hash` don't return error - // keeping it for robustness - Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, - res => res?, + if pruning.next_hash().map_or(false, |h| pinned.contains_key(&h)) { + break } + pruning.prune_one(commit); } } - Ok(()) } /// Revert all non-canonical blocks with the best block number. @@ -435,22 +403,13 @@ impl } } - fn pin(&mut self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> - where - F: Fn() -> bool, - { + fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> { match self.mode { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - let have_block = self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| { - match pruning.have_block(hash, number) { - HaveBlock::No => false, - HaveBlock::Yes => true, - HaveBlock::Maybe => hint(), - } - }); - if have_block { + if self.non_canonical.have_block(hash) || + self.pruning.as_ref().map_or(false, |pruning| pruning.have_block(hash)) + { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { trace!(target: "state-db-pin", "Pinned block: {:?}", hash); @@ -481,13 +440,13 @@ impl } } - pub fn get( + pub fn get( &self, key: &Q, - db: &DB, - ) -> Result, Error> + db: &D, + ) -> Result, Error> where - Q: AsRef, + Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { @@ -497,10 +456,33 @@ impl db.get(key.as_ref()).map_err(Error::Db) } + fn apply_pending(&mut self) { + self.non_canonical.apply_pending(); + if let Some(pruning) = &mut self.pruning { + pruning.apply_pending(); + } + trace!( + target: "forks", + "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", + self.pruning.as_ref().and_then(|p| p.next_hash()), + self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), + self.non_canonical.last_canonicalized_hash(), + self.non_canonical.last_canonicalized_block_number().unwrap_or(0), + self.non_canonical.top_level(), + ); + } + + fn revert_pending(&mut self) { + if let Some(pruning) = &mut self.pruning { + pruning.revert_pending(); + } + self.non_canonical.revert_pending(); + } + fn memory_info(&self) -> StateDbMemoryInfo { StateDbMemoryInfo { non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), - pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(&p))), + pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(p))), pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), } } @@ -508,21 +490,22 @@ impl /// State DB maintenance. See module description. /// Can be shared across threads. -pub struct StateDb { - db: RwLock>, +pub struct StateDb { + db: RwLock>, } -impl - StateDb -{ +impl StateDb { /// Create an instance of [`StateDb`]. - pub fn open( - db: D, + pub fn open( + db: &D, requested_mode: Option, ref_counting: bool, should_init: bool, - ) -> Result<(CommitSet, StateDb), Error> { - let stored_mode = fetch_stored_pruning_mode(&db)?; + ) -> Result<(CommitSet, StateDb), Error> + where + D: MetaDb, + { + let stored_mode = fetch_stored_pruning_mode(db)?; let selected_mode = match (should_init, stored_mode, requested_mode) { (true, stored_mode, requested_mode) => { @@ -565,28 +548,27 @@ impl } /// Add a new non-canonical block. - pub fn insert_block( + pub fn insert_block( &self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. - pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { + pub fn canonicalize_block( + &self, + hash: &BlockHash, + ) -> Result, Error> { self.db.write().canonicalize_block(hash) } /// Prevents pruning of specified block and its descendants. - /// `hint` used for futher checking if the given block exists - pub fn pin(&self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> - where - F: Fn() -> bool, - { - self.db.write().pin(hash, number, hint) + pub fn pin(&self, hash: &BlockHash) -> Result<(), PinError> { + self.db.write().pin(hash) } /// Allows pruning of specified block. @@ -595,13 +577,13 @@ impl } /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get( + pub fn get( &self, key: &Q, - db: &DB, - ) -> Result, Error> + db: &D, + ) -> Result, Error> where - Q: AsRef, + Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { @@ -627,15 +609,18 @@ impl } /// Check if block is pruned away. - pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { + pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { return self.db.read().is_pruned(hash, number) } - /// Reset in-memory changes to the last disk-backed state. - pub fn reset(&self, db: D) -> Result<(), Error> { - let mut state_db = self.db.write(); - *state_db = StateDbSync::new(state_db.mode.clone(), state_db.ref_counting, db)?; - Ok(()) + /// Apply all pending changes + pub fn apply_pending(&self) { + self.db.write().apply_pending(); + } + + /// Revert all pending changes + pub fn revert_pending(&self) { + self.db.write().revert_pending(); } /// Returns the current memory statistics of this instance. @@ -644,17 +629,6 @@ impl } } -/// The result return by `StateDb::is_pruned` -#[derive(Debug, PartialEq, Eq)] -pub enum IsPruned { - /// Definitely pruned - Pruned, - /// Definitely not pruned - NotPruned, - /// May or may not pruned, need futher checking - MaybePruned, -} - fn fetch_stored_pruning_mode(db: &D) -> Result, Error> { let meta_key_mode = to_meta_key(PRUNING_MODE, &()); if let Some(stored_mode) = db.get_meta(&meta_key_mode).map_err(Error::Db)? { @@ -690,19 +664,20 @@ fn choose_pruning_mode( mod tests { use crate::{ test::{make_changeset, make_db, TestDb}, - Constraints, Error, IsPruned, PruningMode, StateDb, StateDbError, + Constraints, Error, PruningMode, StateDb, StateDbError, }; use sp_core::H256; + use std::io; - fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { + fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); let (state_db_init, state_db) = - StateDb::open(db.clone(), Some(settings), false, true).unwrap(); + StateDb::open(&mut db, Some(settings), false, true).unwrap(); db.commit(&state_db_init); db.commit( &state_db - .insert_block( + .insert_block::( &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), @@ -712,7 +687,7 @@ mod tests { ); db.commit( &state_db - .insert_block( + .insert_block::( &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), @@ -722,7 +697,7 @@ mod tests { ); db.commit( &state_db - .insert_block( + .insert_block::( &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), @@ -732,7 +707,7 @@ mod tests { ); db.commit( &state_db - .insert_block( + .insert_block::( &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), @@ -740,10 +715,12 @@ mod tests { ) .unwrap(), ); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(1)).unwrap()); + state_db.apply_pending(); + db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap()); + state_db.apply_pending(); db.commit( &state_db - .insert_block( + .insert_block::( &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), @@ -751,8 +728,11 @@ mod tests { ) .unwrap(), ); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(21)).unwrap()); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(3)).unwrap()); + state_db.apply_pending(); + db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap()); + state_db.apply_pending(); + db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap()); + state_db.apply_pending(); (db, state_db) } @@ -761,7 +741,7 @@ mod tests { fn full_archive_keeps_everything() { let (db, sdb) = make_test_db(PruningMode::ArchiveAll); assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::NotPruned); + assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); } #[test] @@ -770,43 +750,6 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); } - #[test] - fn block_record_unavailable() { - let (mut db, state_db) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(1), - max_mem: None, - })); - // import 2 blocks - for i in &[5, 6] { - db.commit( - &state_db - .insert_block( - &H256::from_low_u64_be(*i), - *i, - &H256::from_low_u64_be(*i - 1), - make_changeset(&[], &[]), - ) - .unwrap(), - ); - } - // canonicalize block 4 but not commit it to db - let c1 = state_db.canonicalize_block(&H256::from_low_u64_be(4)).unwrap(); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(3), 3), IsPruned::Pruned); - - // canonicalize block 5 but not commit it to db, block 4 is not pruned due to it is not - // commit to db yet (unavailable), return `MaybePruned` here because `apply_pending` is not - // called and block 3 is still in cache - let c2 = state_db.canonicalize_block(&H256::from_low_u64_be(5)).unwrap(); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::MaybePruned); - - // commit block 4 and 5 to db, and import a new block will prune both block 4 and 5 - db.commit(&c1); - db.commit(&c2); - db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(6)).unwrap()); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::Pruned); - assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(5), 5), IsPruned::Pruned); - } - #[test] fn prune_window_0() { let (db, _) = make_test_db(PruningMode::Constrained(Constraints { @@ -822,10 +765,10 @@ mod tests { max_blocks: Some(1), max_mem: None, })); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); + assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); } @@ -835,10 +778,10 @@ mod tests { max_blocks: Some(2), max_mem: None, })); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::NotPruned); - assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); + assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); + assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); } @@ -846,11 +789,11 @@ mod tests { fn detects_incompatible_mode() { let mut db = make_db(&[]); let (state_db_init, state_db) = - StateDb::open(db.clone(), Some(PruningMode::ArchiveAll), false, true).unwrap(); + StateDb::open(&mut db, Some(PruningMode::ArchiveAll), false, true).unwrap(); db.commit(&state_db_init); db.commit( &state_db - .insert_block( + .insert_block::( &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), @@ -859,8 +802,8 @@ mod tests { .unwrap(), ); let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); - let state_db_open_result: Result<(_, StateDb), _> = - StateDb::open(db.clone(), Some(new_mode), false, false); + let state_db_open_result: Result<(_, StateDb), _> = + StateDb::open(&mut db, Some(new_mode), false, false); assert!(state_db_open_result.is_err()); } @@ -871,13 +814,12 @@ mod tests { ) { let mut db = make_db(&[]); let (state_db_init, state_db) = - StateDb::::open(db.clone(), mode_when_created, false, true) - .unwrap(); + StateDb::::open(&mut db, mode_when_created, false, true).unwrap(); db.commit(&state_db_init); std::mem::drop(state_db); let state_db_reopen_result = - StateDb::::open(db.clone(), mode_when_reopened, false, false); + StateDb::::open(&mut db, mode_when_reopened, false, false); if let Ok(expected_mode) = expected_effective_mode_when_reopenned { let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); db.commit(&state_db_init); @@ -893,34 +835,34 @@ mod tests { #[test] fn pruning_mode_compatibility() { for (created, reopened, expected) in [ - (None, None, Ok(PruningMode::blocks_pruning(256))), - (None, Some(PruningMode::blocks_pruning(256)), Ok(PruningMode::blocks_pruning(256))), - (None, Some(PruningMode::blocks_pruning(128)), Ok(PruningMode::blocks_pruning(128))), - (None, Some(PruningMode::blocks_pruning(512)), Ok(PruningMode::blocks_pruning(512))), + (None, None, Ok(PruningMode::keep_blocks(256))), + (None, Some(PruningMode::keep_blocks(256)), Ok(PruningMode::keep_blocks(256))), + (None, Some(PruningMode::keep_blocks(128)), Ok(PruningMode::keep_blocks(128))), + (None, Some(PruningMode::keep_blocks(512)), Ok(PruningMode::keep_blocks(512))), (None, Some(PruningMode::ArchiveAll), Err(())), (None, Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::blocks_pruning(256)), None, Ok(PruningMode::blocks_pruning(256))), + (Some(PruningMode::keep_blocks(256)), None, Ok(PruningMode::keep_blocks(256))), ( - Some(PruningMode::blocks_pruning(256)), - Some(PruningMode::blocks_pruning(256)), - Ok(PruningMode::blocks_pruning(256)), + Some(PruningMode::keep_blocks(256)), + Some(PruningMode::keep_blocks(256)), + Ok(PruningMode::keep_blocks(256)), ), ( - Some(PruningMode::blocks_pruning(256)), - Some(PruningMode::blocks_pruning(128)), - Ok(PruningMode::blocks_pruning(128)), + Some(PruningMode::keep_blocks(256)), + Some(PruningMode::keep_blocks(128)), + Ok(PruningMode::keep_blocks(128)), ), ( - Some(PruningMode::blocks_pruning(256)), - Some(PruningMode::blocks_pruning(512)), - Ok(PruningMode::blocks_pruning(512)), + Some(PruningMode::keep_blocks(256)), + Some(PruningMode::keep_blocks(512)), + Ok(PruningMode::keep_blocks(512)), ), - (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveAll), Err(())), - (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveAll), Err(())), + (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveAll), None, Ok(PruningMode::ArchiveAll)), - (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(256)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(128)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(512)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(256)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(128)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(512)), Err(())), ( Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveAll), @@ -928,9 +870,9 @@ mod tests { ), (Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveCanonical), None, Ok(PruningMode::ArchiveCanonical)), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(256)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(128)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(512)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(256)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(128)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(512)), Err(())), (Some(PruningMode::ArchiveCanonical), Some(PruningMode::ArchiveAll), Err(())), ( Some(PruningMode::ArchiveCanonical), diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 3711cf7a42667..13cf5825b1b24 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -19,6 +19,8 @@ //! Canonicalization window. //! Maintains trees of block overlays and allows discarding trees/roots //! The overlays are added in `insert` and removed in `canonicalize`. +//! All pending changes are kept in memory until next call to `apply_pending` or +//! `revert_pending` use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError}; use codec::{Decode, Encode}; @@ -26,7 +28,7 @@ use log::trace; use std::collections::{hash_map::Entry, HashMap, VecDeque}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -pub(crate) const LAST_CANONICAL: &[u8] = b"last_canonical"; +const LAST_CANONICAL: &[u8] = b"last_canonical"; const MAX_BLOCKS_PER_LEVEL: u64 = 32; /// See module documentation. @@ -35,6 +37,8 @@ pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, levels: VecDeque>, parents: HashMap, + pending_canonicalizations: Vec, + pending_insertions: Vec, values: HashMap, // ref counted // would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, @@ -225,6 +229,8 @@ impl NonCanonicalOverlay { last_canonicalized, levels, parents, + pending_canonicalizations: Default::default(), + pending_insertions: Default::default(), pinned: Default::default(), pinned_insertions: Default::default(), values, @@ -310,8 +316,9 @@ impl NonCanonicalOverlay { deleted: changeset.deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); - trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", number, index, hash, journal_record.inserted.len(), journal_record.deleted.len()); + trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); insert_values(&mut self.values, journal_record.inserted); + self.pending_insertions.push(hash.clone()); Ok(commit) } @@ -348,22 +355,38 @@ impl NonCanonicalOverlay { } pub fn last_canonicalized_block_number(&self) -> Option { - self.last_canonicalized.as_ref().map(|&(_, n)| n) + match self.last_canonicalized.as_ref().map(|&(_, n)| n) { + Some(n) => Some(n + self.pending_canonicalizations.len() as u64), + None if !self.pending_canonicalizations.is_empty() => + Some(self.pending_canonicalizations.len() as u64), + _ => None, + } + } + + pub fn last_canonicalized_hash(&self) -> Option { + self.last_canonicalized.as_ref().map(|&(ref h, _)| h.clone()) + } + + pub fn top_level(&self) -> Vec<(BlockHash, u64)> { + let start = self.last_canonicalized_block_number().unwrap_or(0); + self.levels + .get(self.pending_canonicalizations.len()) + .map(|level| level.blocks.iter().map(|r| (r.hash.clone(), start)).collect()) + .unwrap_or_default() } /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. - /// Add a set of changes of the canonicalized block to `CommitSet` - /// Return the block number of the canonicalized block + /// Returns a set of changes that need to be added to the DB. pub fn canonicalize( &mut self, hash: &BlockHash, commit: &mut CommitSet, - ) -> Result { + ) -> Result<(), StateDbError> { trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = match self.levels.pop_front() { - Some(level) => level, - None => return Err(StateDbError::InvalidBlock), - }; + let level = self + .levels + .get(self.pending_canonicalizations.len()) + .ok_or(StateDbError::InvalidBlock)?; let index = level .blocks .iter() @@ -372,63 +395,91 @@ impl NonCanonicalOverlay { let mut discarded_journals = Vec::new(); let mut discarded_blocks = Vec::new(); - for (i, overlay) in level.blocks.into_iter().enumerate() { - let mut pinned_children = 0; - // That's the one we need to canonicalize - if i == index { - commit.data.inserted.extend(overlay.inserted.iter().map(|k| { - ( - k.clone(), - self.values - .get(k) - .expect("For each key in overlays there's a value in values") - .1 - .clone(), - ) - })); - commit.data.deleted.extend(overlay.deleted.clone()); - } else { - // Discard this overlay + for (i, overlay) in level.blocks.iter().enumerate() { + if i != index { self.discard_journals( - 0, + self.pending_canonicalizations.len() + 1, &mut discarded_journals, &mut discarded_blocks, &overlay.hash, ); - pinned_children = discard_descendants( - &mut self.levels.as_mut_slices(), - &mut self.values, - &mut self.parents, - &self.pinned, - &mut self.pinned_insertions, - &overlay.hash, - ); - } - if self.pinned.contains_key(&overlay.hash) { - pinned_children += 1; - } - if pinned_children != 0 { - self.pinned_insertions - .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); - } else { - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); } discarded_journals.push(overlay.journal_key.clone()); discarded_blocks.push(overlay.hash.clone()); } - commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = (hash.clone(), self.front_block_number()); + // get the one we need to canonicalize + let overlay = &level.blocks[index]; + commit.data.inserted.extend(overlay.inserted.iter().map(|k| { + ( + k.clone(), + self.values + .get(k) + .expect("For each key in overlays there's a value in values") + .1 + .clone(), + ) + })); + commit.data.deleted.extend(overlay.deleted.clone()); + + commit.meta.deleted.append(&mut discarded_journals); + let canonicalized = + (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); commit .meta .inserted .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); + self.pending_canonicalizations.push(hash.clone()); + Ok(()) + } - let num = canonicalized.1; - self.last_canonicalized = Some(canonicalized); - Ok(num) + fn apply_canonicalizations(&mut self) { + let last = self.pending_canonicalizations.last().cloned(); + let count = self.pending_canonicalizations.len() as u64; + for hash in self.pending_canonicalizations.drain(..) { + trace!(target: "state-db", "Post canonicalizing {:?}", hash); + let level = + self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); + let index = level + .blocks + .iter() + .position(|overlay| overlay.hash == hash) + .expect("Hash validity is checked in `canonicalize`"); + + // discard unfinalized overlays and values + for (i, overlay) in level.blocks.into_iter().enumerate() { + let mut pinned_children = if i != index { + discard_descendants( + &mut self.levels.as_mut_slices(), + &mut self.values, + &mut self.parents, + &self.pinned, + &mut self.pinned_insertions, + &overlay.hash, + ) + } else { + 0 + }; + if self.pinned.contains_key(&overlay.hash) { + pinned_children += 1; + } + if pinned_children != 0 { + self.pinned_insertions + .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); + } else { + self.parents.remove(&overlay.hash); + discard_values(&mut self.values, overlay.inserted); + } + } + } + if let Some(hash) = last { + let last_canonicalized = ( + hash, + self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1), + ); + self.last_canonicalized = Some(last_canonicalized); + } } /// Get a value from the node overlay. This searches in every existing changeset. @@ -442,7 +493,8 @@ impl NonCanonicalOverlay { /// Check if the block is in the canonicalization queue. pub fn have_block(&self, hash: &BlockHash) -> bool { - self.parents.contains_key(hash) + (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) && + !self.pending_canonicalizations.contains(hash) } /// Revert a single level. Returns commit set that deletes the journal or `None` if not @@ -490,8 +542,50 @@ impl NonCanonicalOverlay { } } + fn revert_insertions(&mut self) { + self.pending_insertions.reverse(); + for hash in self.pending_insertions.drain(..) { + self.parents.remove(&hash); + // find a level. When iterating insertions backwards the hash is always last in the + // level. + let level_index = self + .levels + .iter() + .position(|level| { + level.blocks.last().expect("Hash is added in `insert` in reverse order").hash == + hash + }) + .expect("Hash is added in insert"); + + let overlay_index = self.levels[level_index].blocks.len() - 1; + let overlay = self.levels[level_index].remove(overlay_index); + discard_values(&mut self.values, overlay.inserted); + if self.levels[level_index].blocks.is_empty() { + debug_assert_eq!(level_index, self.levels.len() - 1); + self.levels.pop_back(); + } + } + } + + /// Apply all pending changes + pub fn apply_pending(&mut self) { + self.apply_canonicalizations(); + self.pending_insertions.clear(); + } + + /// Revert all pending changes + pub fn revert_pending(&mut self) { + self.pending_canonicalizations.clear(); + self.revert_insertions(); + } + /// Pin state values in memory pub fn pin(&mut self, hash: &BlockHash) { + if self.pending_insertions.contains(hash) { + // Pinning pending state is not implemented. Pending states + // won't be pruned for quite some time anyway, so it's not a big deal. + return + } let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); @@ -661,7 +755,7 @@ mod tests { .unwrap(), ); db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - assert_eq!(db.meta_len(), 3); + assert_eq!(db.meta.len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); assert_eq!(overlay.levels, overlay2.levels); @@ -684,6 +778,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -710,13 +805,18 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); - assert!(!contains(&overlay, 5)); - assert!(contains(&overlay, 7)); + assert!(contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 2); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); assert_eq!(overlay.parents.len(), 1); + assert!(!contains(&overlay, 5)); + assert!(contains(&overlay, 7)); let mut commit = CommitSet::default(); overlay.canonicalize(&h2, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 0); assert_eq!(overlay.parents.len(), 0); assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); @@ -735,11 +835,13 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); + assert!(contains(&overlay, 1)); + overlay.apply_pending(); assert!(!contains(&overlay, 1)); } #[test] - fn insert_and_canonicalize() { + fn insert_with_pending_canonicalization() { let h1 = H256::random(); let h2 = H256::random(); let h3 = H256::random(); @@ -748,11 +850,13 @@ mod tests { let changeset = make_changeset(&[], &[]); db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset.clone()).unwrap()); db.commit(&overlay.insert(&h2, 2, &h1, changeset.clone()).unwrap()); + overlay.apply_pending(); let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); overlay.canonicalize(&h2, &mut commit).unwrap(); db.commit(&commit); db.commit(&overlay.insert(&h3, 3, &h2, changeset.clone()).unwrap()); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); } @@ -822,6 +926,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 2); assert_eq!(overlay.parents.len(), 6); assert!(!contains(&overlay, 1)); @@ -842,6 +947,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); assert_eq!(overlay.parents.len(), 3); assert!(!contains(&overlay, 11)); @@ -858,6 +964,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2_2, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 0); assert_eq!(overlay.parents.len(), 0); assert!(db.data_eq(&make_db(&[1, 12, 122]))); @@ -886,6 +993,31 @@ mod tests { assert!(overlay.revert_one().is_none()); } + #[test] + fn revert_pending_insertion() { + let h1 = H256::random(); + let h2_1 = H256::random(); + let h2_2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset3 = make_changeset(&[9], &[]); + overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap(); + assert!(contains(&overlay, 5)); + overlay.insert(&h2_1, 2, &h1, changeset2).unwrap(); + overlay.insert(&h2_2, 2, &h1, changeset3).unwrap(); + assert!(contains(&overlay, 7)); + assert!(contains(&overlay, 5)); + assert!(contains(&overlay, 9)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 3); + overlay.revert_pending(); + assert!(!contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + } + #[test] fn keeps_pinned() { let mut db = make_db(&[]); @@ -900,12 +1032,14 @@ mod tests { let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); + overlay.apply_pending(); overlay.pin(&h_1); let mut commit = CommitSet::default(); overlay.canonicalize(&h_2, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); assert!(contains(&overlay, 1)); overlay.unpin(&h_1); assert!(!contains(&overlay, 1)); @@ -929,12 +1063,14 @@ mod tests { db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); db.commit(&overlay.insert(&h_3, 1, &H256::default(), c_3).unwrap()); + overlay.apply_pending(); overlay.pin(&h_1); let mut commit = CommitSet::default(); overlay.canonicalize(&h_3, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); // 1_2 should be discarded, 1_1 is pinned assert!(contains(&overlay, 1)); overlay.unpin(&h_1); @@ -957,12 +1093,14 @@ mod tests { db.commit(&overlay.insert(&h_11, 1, &H256::default(), c_11).unwrap()); db.commit(&overlay.insert(&h_12, 1, &H256::default(), c_12).unwrap()); db.commit(&overlay.insert(&h_21, 2, &h_11, c_21).unwrap()); + overlay.apply_pending(); overlay.pin(&h_21); let mut commit = CommitSet::default(); overlay.canonicalize(&h_12, &mut commit).unwrap(); db.commit(&commit); + overlay.apply_pending(); // 1_1 and 2_1 should be both pinned assert!(contains(&overlay, 1)); overlay.unpin(&h_21); @@ -990,10 +1128,12 @@ mod tests { overlay.canonicalize(&root, &mut commit).unwrap(); overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); + overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); assert!(contains(&overlay, 21)); assert!(!contains(&overlay, 11)); assert!(db.get_meta(&to_journal_key(12, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(12, 0)).unwrap().is_none()); // Restore into a new overlay and check that journaled value exists. let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); @@ -1002,6 +1142,7 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h21, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); + overlay.apply_pending(); assert!(!contains(&overlay, 21)); } @@ -1025,6 +1166,7 @@ mod tests { overlay.canonicalize(&root, &mut commit).unwrap(); overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); + overlay.apply_pending(); // add another block at top level. It should reuse journal index 0 of previously discarded // block diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 458522b8119fd..0fdcb8e822b6f 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -24,65 +24,74 @@ //! the death list. //! The changes are journaled in the DB. -use crate::{ - noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError, - DEFAULT_MAX_BLOCK_CONSTRAINT, -}; +use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; use codec::{Decode, Encode}; -use log::trace; +use log::{trace, warn}; use std::collections::{HashMap, HashSet, VecDeque}; -pub(crate) const LAST_PRUNED: &[u8] = b"last_pruned"; +const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] -pub struct RefWindow { - /// A queue of blocks keep tracking keys that should be deleted for each block in the - /// pruning window. - queue: DeathRowQueue, - /// Block number that is next to be pruned. - base: u64, +pub struct RefWindow { + /// A queue of keys that should be deleted for each block in the pruning window. + death_rows: VecDeque>, + /// An index that maps each key from `death_rows` to block number. + death_index: HashMap, + /// Block number that corresponds to the front of `death_rows`. + pending_number: u64, + /// Number of call of `note_canonical` after + /// last call `apply_pending` or `revert_pending` + pending_canonicalizations: usize, + /// Number of calls of `prune_one` after + /// last call `apply_pending` or `revert_pending` + pending_prunings: usize, + /// Keep track of re-inserted keys and do not delete them when pruning. + /// Setting this to false requires backend that supports reference + /// counting. + count_insertions: bool, } -/// `DeathRowQueue` used to keep track of blocks in the pruning window, there are two flavors: -/// - `Mem`, used when the backend database do not supports reference counting, keep all -/// blocks in memory, and keep track of re-inserted keys to not delete them when pruning -/// - `DbBacked`, used when the backend database supports reference counting, only keep -/// a few number of blocks in memory and load more blocks on demand -#[derive(parity_util_mem_derive::MallocSizeOf)] -enum DeathRowQueue { - Mem { - /// A queue of keys that should be deleted for each block in the pruning window. - death_rows: VecDeque>, - /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, - }, - DbBacked { - // The backend database - #[ignore_malloc_size_of = "Shared data"] - db: D, - /// A queue of keys that should be deleted for each block in the pruning window. - /// Only caching the first few blocks of the pruning window, blocks inside are - /// successive and ordered by block number - cache: VecDeque>, - /// A soft limit of the cache's size - cache_capacity: usize, - /// Last block number added to the window - last: Option, - }, +#[derive(Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] +struct DeathRow { + hash: BlockHash, + journal_key: Vec, + deleted: HashSet, +} + +#[derive(Encode, Decode)] +struct JournalRecord { + hash: BlockHash, + inserted: Vec, + deleted: Vec, } -impl DeathRowQueue { - /// Return a `DeathRowQueue` that all blocks are keep in memory - fn new_mem(db: &D, base: u64) -> Result, Error> { - let mut block = base; - let mut queue = DeathRowQueue::::Mem { - death_rows: VecDeque::new(), - death_index: HashMap::new(), +fn to_journal_key(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL, &block) +} + +impl RefWindow { + pub fn new( + db: &D, + count_insertions: bool, + ) -> Result, Error> { + let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)?; + let pending_number: u64 = match last_pruned { + Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, + None => 0, + }; + let mut block = pending_number; + let mut pruning = RefWindow { + death_rows: Default::default(), + death_index: Default::default(), + pending_number, + pending_canonicalizations: 0, + pending_prunings: 0, + count_insertions, }; // read the journal - trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base); + trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); loop { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(Error::Db)? { @@ -90,764 +99,343 @@ impl DeathRowQueue { let record: JournalRecord = Decode::decode(&mut record.as_slice())?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - queue.import(base, block, record); + pruning.import( + &record.hash, + journal_key, + record.inserted.into_iter(), + record.deleted, + ); }, None => break, } block += 1; } - Ok(queue) + Ok(pruning) } - /// Return a `DeathRowQueue` that backed by an database, and only keep a few number - /// of blocks in memory - fn new_db_backed( - db: D, - base: u64, - last: Option, - window_size: u32, - ) -> Result, Error> { - // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` - let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; - let mut cache = VecDeque::with_capacity(cache_capacity); - trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); - DeathRowQueue::load_batch_from_db(&db, &mut cache, base, cache_capacity)?; - Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, last }) - } - - /// import a new block to the back of the queue - fn import(&mut self, base: u64, num: u64, journal_record: JournalRecord) { - let JournalRecord { hash, inserted, deleted } = journal_record; - trace!(target: "state-db", "Importing {}, base={}", num, base); - match self { - DeathRowQueue::DbBacked { cache, cache_capacity, last, .. } => { - // If the new block continues cached range and there is space, load it directly into - // cache. - if num == base + cache.len() as u64 && cache.len() < *cache_capacity { - trace!(target: "state-db", "Adding to DB backed cache {:?} (#{})", hash, num); - cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); - } - *last = Some(num); - }, - DeathRowQueue::Mem { death_rows, death_index } => { - // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = death_index.remove(&k) { - death_rows[(block - base) as usize].deleted.remove(&k); - } - } - // add new keys - let imported_block = base + death_rows.len() as u64; - for k in deleted.iter() { - death_index.insert(k.clone(), imported_block); - } - death_rows.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); - }, - } - } - - /// Pop out one block from the front of the queue, `base` is the block number - /// of the first block of the queue - fn pop_front( + fn import>( &mut self, - base: u64, - ) -> Result>, Error> { - match self { - DeathRowQueue::DbBacked { db, cache, cache_capacity, .. } => { - if cache.is_empty() { - DeathRowQueue::load_batch_from_db(db, cache, base, *cache_capacity)?; + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + ) { + if self.count_insertions { + // remove all re-inserted keys from death rows + for k in inserted { + if let Some(block) = self.death_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); } - Ok(cache.pop_front()) - }, - DeathRowQueue::Mem { death_rows, death_index } => match death_rows.pop_front() { - Some(row) => { - for k in row.deleted.iter() { - death_index.remove(k); - } - Ok(Some(row)) - }, - None => Ok(None), - }, - } - } - - /// Load a batch of blocks from the backend database into `cache`, starting from `base` and up - /// to `base + cache_capacity` - fn load_batch_from_db( - db: &D, - cache: &mut VecDeque>, - base: u64, - cache_capacity: usize, - ) -> Result<(), Error> { - let start = base + cache.len() as u64; - let batch_size = cache_capacity; - for i in 0..batch_size as u64 { - match load_death_row_from_db::(db, start + i)? { - Some(row) => { - cache.push_back(row); - }, - None => break, } - } - Ok(()) - } - /// Check if the block at the given `index` of the queue exist - /// it is the caller's responsibility to ensure `index` won't be out of bounds - fn have_block(&self, hash: &BlockHash, index: usize) -> HaveBlock { - match self { - DeathRowQueue::DbBacked { cache, .. } => { - if cache.len() > index { - (cache[index].hash == *hash).into() - } else { - // The block is not in the cache but it still may exist on disk. - HaveBlock::Maybe - } - }, - DeathRowQueue::Mem { death_rows, .. } => (death_rows[index].hash == *hash).into(), - } - } - - /// Return the number of block in the pruning window - fn len(&self, base: u64) -> u64 { - match self { - DeathRowQueue::DbBacked { last, .. } => last.map_or(0, |l| l + 1 - base), - DeathRowQueue::Mem { death_rows, .. } => death_rows.len() as u64, - } - } - - #[cfg(test)] - fn get_mem_queue_state( - &self, - ) -> Option<(&VecDeque>, &HashMap)> { - match self { - DeathRowQueue::DbBacked { .. } => None, - DeathRowQueue::Mem { death_rows, death_index } => Some((death_rows, death_index)), - } - } - - #[cfg(test)] - fn get_db_backed_queue_state( - &self, - ) -> Option<(&VecDeque>, Option)> { - match self { - DeathRowQueue::DbBacked { cache, last, .. } => Some((cache, *last)), - DeathRowQueue::Mem { .. } => None, - } - } -} - -fn load_death_row_from_db( - db: &D, - block: u64, -) -> Result>, Error> { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(Error::Db)? { - Some(record) => { - let JournalRecord { hash, deleted, .. } = Decode::decode(&mut record.as_slice())?; - Ok(Some(DeathRow { hash, deleted: deleted.into_iter().collect() })) - }, - None => Ok(None), - } -} - -#[derive(Clone, Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] -struct DeathRow { - hash: BlockHash, - deleted: HashSet, -} - -#[derive(Encode, Decode, Default)] -struct JournalRecord { - hash: BlockHash, - inserted: Vec, - deleted: Vec, -} - -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) -} - -/// The result return by `RefWindow::have_block` -#[derive(Debug, PartialEq, Eq)] -pub enum HaveBlock { - /// Definitely don't have this block. - No, - /// May or may not have this block, need further checking - Maybe, - /// Definitely has this block - Yes, -} - -impl From for HaveBlock { - fn from(have: bool) -> Self { - if have { - HaveBlock::Yes - } else { - HaveBlock::No + // add new keys + let imported_block = self.pending_number + self.death_rows.len() as u64; + for k in deleted.iter() { + self.death_index.insert(k.clone(), imported_block); + } } - } -} - -impl RefWindow { - pub fn new( - db: D, - window_size: u32, - count_insertions: bool, - ) -> Result, Error> { - // the block number of the first block in the queue or the next block number if the queue is - // empty - let base = match db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)? { - Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, - None => 0, - }; - // the block number of the last block in the queue - let last_canonicalized_number = - match db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(Error::Db)? { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?.1), - None => None, - }; - - let queue = if count_insertions { - DeathRowQueue::new_mem(&db, base)? - } else { - let last = match last_canonicalized_number { - Some(last_canonicalized_number) => { - debug_assert!(last_canonicalized_number + 1 >= base); - Some(last_canonicalized_number) - }, - // None means `LAST_CANONICAL` is never been wrote, since the pruning journals are - // in the same `CommitSet` as `LAST_CANONICAL`, it means no pruning journal have - // ever been committed to the db, thus set `unload` to zero - None => None, - }; - DeathRowQueue::new_db_backed(db, base, last, window_size)? - }; - - Ok(RefWindow { queue, base }) + self.death_rows.push_back(DeathRow { + hash: hash.clone(), + deleted: deleted.into_iter().collect(), + journal_key, + }); } pub fn window_size(&self) -> u64 { - self.queue.len(self.base) as u64 + (self.death_rows.len() - self.pending_prunings) as u64 } - /// Get the hash of the next pruning block - pub fn next_hash(&mut self) -> Result, Error> { - let res = match &mut self.queue { - DeathRowQueue::DbBacked { db, cache, cache_capacity, .. } => { - if cache.is_empty() { - DeathRowQueue::load_batch_from_db(db, cache, self.base, *cache_capacity)?; - } - cache.front().map(|r| r.hash.clone()) - }, - DeathRowQueue::Mem { death_rows, .. } => death_rows.front().map(|r| r.hash.clone()), - }; - Ok(res) + pub fn next_hash(&self) -> Option { + self.death_rows.get(self.pending_prunings).map(|r| r.hash.clone()) } pub fn mem_used(&self) -> usize { 0 } - fn is_empty(&self) -> bool { - self.window_size() == 0 + pub fn pending(&self) -> u64 { + self.pending_number + self.pending_prunings as u64 } - // Check if a block is in the pruning window and not be pruned yet - pub fn have_block(&self, hash: &BlockHash, number: u64) -> HaveBlock { - // if the queue is empty or the block number exceed the pruning window, we definitely - // do not have this block - if self.is_empty() || number < self.base || number >= self.base + self.window_size() { - return HaveBlock::No - } - self.queue.have_block(hash, (number - self.base) as usize) + pub fn have_block(&self, hash: &BlockHash) -> bool { + self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) } /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { - if let Some(pruned) = self.queue.pop_front(self.base)? { + pub fn prune_one(&mut self, commit: &mut CommitSet) { + if let Some(pruned) = self.death_rows.get(self.pending_prunings) { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.base; - commit.data.deleted.extend(pruned.deleted.into_iter()); + let index = self.pending_number + self.pending_prunings as u64; + commit.data.deleted.extend(pruned.deleted.iter().cloned()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit.meta.deleted.push(to_journal_key(self.base)); - self.base += 1; - Ok(()) + commit.meta.deleted.push(pruned.journal_key.clone()); + self.pending_prunings += 1; } else { - trace!(target: "state-db", "Trying to prune when there's nothing to prune"); - Err(Error::StateDb(StateDbError::BlockUnavailable)) + warn!(target: "state-db", "Trying to prune when there's nothing to prune"); } } /// Add a change set to the window. Creates a journal record and pushes it to `commit` - pub fn note_canonical( - &mut self, - hash: &BlockHash, - number: u64, - commit: &mut CommitSet, - ) -> Result<(), Error> { - if self.base == 0 && self.is_empty() && number > 0 { - // assume that parent was canonicalized - self.base = number; - } else if (self.base + self.window_size()) != number { - return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) - } + pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { + let inserted = if self.count_insertions { commit.data.inserted.iter().map(|(k, _)| k.clone()).collect() } else { Default::default() }; - let deleted = std::mem::take(&mut commit.data.deleted); + let deleted = ::std::mem::take(&mut commit.data.deleted); let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; - commit.meta.inserted.push((to_journal_key(number), journal_record.encode())); - self.queue.import(self.base, number, journal_record); - Ok(()) + let block = self.pending_number + self.death_rows.len() as u64; + let journal_key = to_journal_key(block); + commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); + self.import( + &journal_record.hash, + journal_key, + journal_record.inserted.into_iter(), + journal_record.deleted, + ); + self.pending_canonicalizations += 1; + } + + /// Apply all pending changes + pub fn apply_pending(&mut self) { + self.pending_canonicalizations = 0; + for _ in 0..self.pending_prunings { + let pruned = self + .death_rows + .pop_front() + .expect("pending_prunings is always < death_rows.len()"); + trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + if self.count_insertions { + for k in pruned.deleted.iter() { + self.death_index.remove(k); + } + } + self.pending_number += 1; + } + self.pending_prunings = 0; + } + + /// Revert all pending changes + pub fn revert_pending(&mut self) { + // Revert pending deletions. + // Note that pending insertions might cause some existing deletions to be removed from + // `death_index` We don't bother to track and revert that for now. This means that a few + // nodes might end up no being deleted in case transaction fails and `revert_pending` is + // called. + self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); + if self.count_insertions { + let new_max_block = self.death_rows.len() as u64 + self.pending_number; + self.death_index.retain(|_, block| *block < new_max_block); + } + self.pending_canonicalizations = 0; + self.pending_prunings = 0; } } #[cfg(test)] mod tests { - use super::{to_journal_key, DeathRowQueue, HaveBlock, JournalRecord, RefWindow, LAST_PRUNED}; + use super::RefWindow; use crate::{ - noncanonical::LAST_CANONICAL, test::{make_commit, make_db, TestDb}, - to_meta_key, CommitSet, Error, Hash, StateDbError, DEFAULT_MAX_BLOCK_CONSTRAINT, + CommitSet, }; - use codec::Encode; use sp_core::H256; - fn check_journal(pruning: &RefWindow, db: &TestDb) { - let count_insertions = matches!(pruning.queue, DeathRowQueue::Mem { .. }); - let restored: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); - assert_eq!(pruning.base, restored.base); - assert_eq!(pruning.queue.get_mem_queue_state(), restored.queue.get_mem_queue_state()); + fn check_journal(pruning: &RefWindow, db: &TestDb) { + let restored: RefWindow = RefWindow::new(db, pruning.count_insertions).unwrap(); + assert_eq!(pruning.pending_number, restored.pending_number); + assert_eq!(pruning.death_rows, restored.death_rows); + assert_eq!(pruning.death_index, restored.death_index); } #[test] fn created_from_empty_db() { let db = make_db(&[]); - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); - assert_eq!(pruning.base, 0); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert!(death_rows.is_empty()); - assert!(death_index.is_empty()); + let pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + assert_eq!(pruning.pending_number, 0); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); } #[test] fn prune_empty() { let db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); let mut commit = CommitSet::default(); - assert_eq!( - Err(Error::StateDb(StateDbError::BlockUnavailable)), - pruning.prune_one(&mut commit) - ); - assert_eq!(pruning.base, 0); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert!(death_rows.is_empty()); - assert!(death_index.is_empty()); + pruning.prune_one(&mut commit); + assert_eq!(pruning.pending_number, 0); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + assert!(pruning.pending_prunings == 0); + assert!(pruning.pending_canonicalizations == 0); } #[test] fn prune_one() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); let mut commit = make_commit(&[4, 5], &[1, 3]); - let hash = H256::random(); - pruning.note_canonical(&hash, 0, &mut commit).unwrap(); + let h = H256::random(); + pruning.note_canonical(&h, &mut commit); db.commit(&commit); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Yes); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Yes); + assert!(pruning.have_block(&h)); + pruning.apply_pending(); + assert!(pruning.have_block(&h)); assert!(commit.data.deleted.is_empty()); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert_eq!(death_rows.len(), 1); - assert_eq!(death_index.len(), 2); + assert_eq!(pruning.death_rows.len(), 1); + assert_eq!(pruning.death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::No); + pruning.prune_one(&mut commit); + assert!(!pruning.have_block(&h)); db.commit(&commit); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::No); + pruning.apply_pending(); + assert!(!pruning.have_block(&h)); assert!(db.data_eq(&make_db(&[2, 4, 5]))); - let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); - assert!(death_rows.is_empty()); - assert!(death_index.is_empty()); - assert_eq!(pruning.base, 1); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + assert_eq!(pruning.pending_number, 1); } #[test] fn prune_two() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); + pruning.apply_pending(); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); + pruning.apply_pending(); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); + pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.base, 2); + assert_eq!(pruning.pending_number, 2); } #[test] fn prune_two_pending() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); + pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.base, 2); + assert_eq!(pruning.pending_number, 2); } #[test] fn reinserted_survives() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.apply_pending(); check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - assert_eq!(pruning.base, 3); + pruning.apply_pending(); + assert_eq!(pruning.pending_number, 3); } #[test] fn reinserted_survive_pending() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - assert_eq!(pruning.base, 3); + pruning.apply_pending(); + assert_eq!(pruning.pending_number, 3); } #[test] fn reinserted_ignores() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let mut pruning: RefWindow = RefWindow::new(&db, false).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); + pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.apply_pending(); check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + pruning.prune_one(&mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - } - - fn push_last_canonicalized(block: u64, commit: &mut CommitSet) { - commit - .meta - .inserted - .push((to_meta_key(LAST_CANONICAL, &()), (block, block).encode())); - } - - fn push_last_pruned(block: u64, commit: &mut CommitSet) { - commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), block.encode())); - } - - #[test] - fn init_db_backed_queue() { - let mut db = make_db(&[]); - let mut commit = CommitSet::default(); - - fn load_pruning_from_db(db: TestDb) -> (usize, u64) { - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - (cache.len(), pruning.base) - } - - fn push_record(block: u64, commit: &mut CommitSet) { - commit - .meta - .inserted - .push((to_journal_key(block), JournalRecord::::default().encode())); - } - - // empty database - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 0); - assert_eq!(base, 0); - - // canonicalized the genesis block but no pruning - push_last_canonicalized(0, &mut commit); - push_record(0, &mut commit); - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 1); - assert_eq!(base, 0); - - // pruned the genesis block - push_last_pruned(0, &mut commit); - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 0); - assert_eq!(base, 1); - - // canonicalize more blocks - push_last_canonicalized(10, &mut commit); - for i in 1..=10 { - push_record(i, &mut commit); - } - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 10); - assert_eq!(base, 1); - - // pruned all blocks - push_last_pruned(10, &mut commit); - db.commit(&commit); - let (loaded_blocks, base) = load_pruning_from_db(db.clone()); - assert_eq!(loaded_blocks, 0); - assert_eq!(base, 11); - } - - #[test] - fn db_backed_queue() { - let mut db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; - - // start as an empty queue - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), 0); - assert_eq!(last, None); - - // import blocks - // queue size and content should match - for i in 0..(cache_capacity + 10) { - let mut commit = make_commit(&[], &[]); - pruning.note_canonical(&(i as u64), i as u64, &mut commit).unwrap(); - push_last_canonicalized(i as u64, &mut commit); - db.commit(&commit); - // blocks will fill the cache first - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - if i < cache_capacity { - assert_eq!(cache.len(), i + 1); - } else { - assert_eq!(cache.len(), cache_capacity); - } - assert_eq!(last, Some(i as u64)); - } - assert_eq!(pruning.window_size(), cache_capacity as u64 + 10); - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - assert_eq!(last, Some(cache_capacity as u64 + 10 - 1)); - for i in 0..cache_capacity { - assert_eq!(cache[i].hash, i as u64); - } - - // import a new block to the end of the queue - // won't keep the new block in memory - let mut commit = CommitSet::default(); - pruning - .note_canonical(&(cache_capacity as u64 + 10), cache_capacity as u64 + 10, &mut commit) - .unwrap(); - assert_eq!(pruning.window_size(), cache_capacity as u64 + 11); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - - // revert the last add that no apply yet - // NOTE: do not commit the previous `CommitSet` to db - pruning = RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; - assert_eq!(pruning.window_size(), cache_capacity as u64 + 10); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - - // remove one block from the start of the queue - // block is removed from the head of cache - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - assert_eq!(pruning.window_size(), cache_capacity as u64 + 9); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity - 1); - for i in 0..(cache_capacity - 1) { - assert_eq!(cache[i].hash, (i + 1) as u64); - } - - // load a new queue from db - // `cache` is full again but the content of the queue should be the same - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - assert_eq!(pruning.window_size(), cache_capacity as u64 + 9); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - for i in 0..cache_capacity { - assert_eq!(cache[i].hash, (i + 1) as u64); - } - } - - #[test] - fn load_block_from_db() { - let mut db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; - - // import blocks - for i in 0..(cache_capacity as u64 * 2 + 10) { - let mut commit = make_commit(&[], &[]); - pruning.note_canonical(&i, i, &mut commit).unwrap(); - push_last_canonicalized(i as u64, &mut commit); - db.commit(&commit); - } - - // the following operations won't trigger loading block from db: - // - getting block in cache - // - getting block not in the queue - assert_eq!(pruning.next_hash().unwrap().unwrap(), 0); - let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity); - assert_eq!(last, Some(cache_capacity as u64 * 2 + 10 - 1)); - - // clear all block loaded in cache - for _ in 0..cache_capacity * 2 { - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); - } - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert!(cache.is_empty()); - - // getting the hash of block that not in cache will also trigger loading - // the remaining blocks from db - assert_eq!(pruning.next_hash().unwrap().unwrap(), (cache_capacity * 2) as u64); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), 10); - - // load a new queue from db - // `cache` should be the same - let pruning: RefWindow = - RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - assert_eq!(pruning.window_size(), 10); - let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), 10); - for i in 0..10 { - assert_eq!(cache[i].hash, (cache_capacity * 2 + i) as u64); - } - } - - #[test] - fn get_block_from_queue() { - let mut db = make_db(&[]); - let mut pruning: RefWindow = - RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as u64; - - // import blocks and commit to db - let mut commit = make_commit(&[], &[]); - for i in 0..(cache_capacity + 10) { - pruning.note_canonical(&i, i, &mut commit).unwrap(); - } - db.commit(&commit); - - // import a block but not commit to db yet - let mut pending_commit = make_commit(&[], &[]); - let index = cache_capacity + 10; - pruning.note_canonical(&index, index, &mut pending_commit).unwrap(); - - let mut commit = make_commit(&[], &[]); - // prune blocks that had committed to db - for i in 0..(cache_capacity + 10) { - assert_eq!(pruning.next_hash().unwrap(), Some(i)); - pruning.prune_one(&mut commit).unwrap(); - } - // return `None` for block that did not commit to db - assert_eq!(pruning.next_hash().unwrap(), None); - assert_eq!( - pruning.prune_one(&mut commit).unwrap_err(), - Error::StateDb(StateDbError::BlockUnavailable) - ); - // commit block to db and no error return - db.commit(&pending_commit); - assert_eq!(pruning.next_hash().unwrap(), Some(index)); - pruning.prune_one(&mut commit).unwrap(); - db.commit(&commit); + assert!(pruning.death_index.is_empty()); } } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 314ec2902452a..9fb97036b2f24 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -20,16 +20,10 @@ use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use sp_core::H256; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -#[derive(Default, Debug, Clone)] -pub struct TestDb(Arc>); +use std::collections::HashMap; #[derive(Default, Debug, Clone, PartialEq, Eq)] -struct TestDbInner { +pub struct TestDb { pub data: HashMap, pub meta: HashMap, DBValue>, } @@ -38,7 +32,7 @@ impl MetaDb for TestDb { type Error = (); fn get_meta(&self, key: &[u8]) -> Result, ()> { - Ok(self.0.read().unwrap().meta.get(key).cloned()) + Ok(self.meta.get(key).cloned()) } } @@ -47,29 +41,25 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.0.read().unwrap().data.get(key).cloned()) + Ok(self.data.get(key).cloned()) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - self.0.write().unwrap().data.extend(commit.data.inserted.iter().cloned()); - self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); + self.data.extend(commit.data.inserted.iter().cloned()); + self.meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.data.deleted.iter() { - self.0.write().unwrap().data.remove(k); + self.data.remove(k); } - self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); + self.meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { - self.0.write().unwrap().meta.remove(k); + self.meta.remove(k); } } pub fn data_eq(&self, other: &TestDb) -> bool { - self.0.read().unwrap().data == other.0.read().unwrap().data - } - - pub fn meta_len(&self) -> usize { - self.0.read().unwrap().meta.len() + self.data == other.data } } @@ -88,11 +78,11 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { } pub fn make_db(inserted: &[u64]) -> TestDb { - TestDb(Arc::new(RwLock::new(TestDbInner { + TestDb { data: inserted .iter() .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), meta: Default::default(), - }))) + } } diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 12ffc0c2e8d7a..d02637fcf884b 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.79" thiserror = "1.0.30" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/sysinfo/Cargo.toml b/client/sysinfo/Cargo.toml index 882cbd96c1c5f..0973631a3cc24 100644 --- a/client/sysinfo/Cargo.toml +++ b/client/sysinfo/Cargo.toml @@ -21,11 +21,8 @@ rand = "0.7.3" rand_pcg = "0.2.1" regex = "1" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.79" sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } sp-std = { version = "4.0.0", path = "../../primitives/std" } - -[dev-dependencies] -sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/sysinfo/src/lib.rs b/client/sysinfo/src/lib.rs index cef5a4d210df1..be63fefe9ecd1 100644 --- a/client/sysinfo/src/lib.rs +++ b/client/sysinfo/src/lib.rs @@ -29,7 +29,6 @@ mod sysinfo_linux; pub use sysinfo::{ benchmark_cpu, benchmark_disk_random_writes, benchmark_disk_sequential_writes, benchmark_memory, benchmark_sr25519_verify, gather_hwbench, gather_sysinfo, - serialize_throughput, serialize_throughput_option, Throughput, }; /// The operating system part of the current target triplet. @@ -45,23 +44,13 @@ pub const TARGET_ENV: &str = include_str!(concat!(env!("OUT_DIR"), "/target_env. #[derive(Clone, Debug, serde::Serialize)] pub struct HwBench { /// The CPU speed, as measured in how many MB/s it can hash using the BLAKE2b-256 hash. - #[serde(serialize_with = "serialize_throughput")] - pub cpu_hashrate_score: Throughput, + pub cpu_hashrate_score: u64, /// Memory bandwidth in MB/s, calculated by measuring the throughput of `memcpy`. - #[serde(serialize_with = "serialize_throughput")] - pub memory_memcpy_score: Throughput, + pub memory_memcpy_score: u64, /// Sequential disk write speed in MB/s. - #[serde( - serialize_with = "serialize_throughput_option", - skip_serializing_if = "Option::is_none" - )] - pub disk_sequential_write_score: Option, + pub disk_sequential_write_score: Option, /// Random disk write speed in MB/s. - #[serde( - serialize_with = "serialize_throughput_option", - skip_serializing_if = "Option::is_none" - )] - pub disk_random_write_score: Option, + pub disk_random_write_score: Option, } /// Limit the execution time of a benchmark. @@ -131,14 +120,14 @@ pub fn print_sysinfo(sysinfo: &sc_telemetry::SysInfo) { /// Prints out the results of the hardware benchmarks in the logs. pub fn print_hwbench(hwbench: &HwBench) { - log::info!("🏁 CPU score: {}", hwbench.cpu_hashrate_score); - log::info!("🏁 Memory score: {}", hwbench.memory_memcpy_score); + log::info!("🏁 CPU score: {}MB/s", hwbench.cpu_hashrate_score); + log::info!("🏁 Memory score: {}MB/s", hwbench.memory_memcpy_score); if let Some(score) = hwbench.disk_sequential_write_score { - log::info!("🏁 Disk score (seq. writes): {}", score); + log::info!("🏁 Disk score (seq. writes): {}MB/s", score); } if let Some(score) = hwbench.disk_random_write_score { - log::info!("🏁 Disk score (rand. writes): {}", score); + log::info!("🏁 Disk score (rand. writes): {}MB/s", score); } } diff --git a/client/sysinfo/src/sysinfo.rs b/client/sysinfo/src/sysinfo.rs index c66a6f6a62aed..fc347c1cc2eb3 100644 --- a/client/sysinfo/src/sysinfo.rs +++ b/client/sysinfo/src/sysinfo.rs @@ -21,10 +21,9 @@ use crate::{ExecutionLimit, HwBench}; use sc_telemetry::SysInfo; use sp_core::{sr25519, Pair}; use sp_io::crypto::sr25519_verify; -use sp_std::{fmt, prelude::*}; +use sp_std::prelude::*; use rand::{seq::SliceRandom, Rng, RngCore}; -use serde::Serializer; use std::{ fs::File, io::{Seek, SeekFrom, Write}, @@ -33,110 +32,6 @@ use std::{ time::{Duration, Instant}, }; -/// The unit in which the [`Throughput`] (bytes per second) is denoted. -pub enum Unit { - GiBs, - MiBs, - KiBs, -} - -impl fmt::Display for Unit { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(match self { - Unit::GiBs => "GiBs", - Unit::MiBs => "MiBs", - Unit::KiBs => "KiBs", - }) - } -} - -/// Throughput as measured in bytes per second. -#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] -pub struct Throughput(f64); - -const KIBIBYTE: f64 = (1 << 10) as f64; -const MEBIBYTE: f64 = (1 << 20) as f64; -const GIBIBYTE: f64 = (1 << 30) as f64; - -impl Throughput { - /// Construct [`Self`] from kibibyte/s. - pub fn from_kibs(kibs: f64) -> Throughput { - Throughput(kibs * KIBIBYTE) - } - - /// Construct [`Self`] from mebibyte/s. - pub fn from_mibs(mibs: f64) -> Throughput { - Throughput(mibs * MEBIBYTE) - } - - /// Construct [`Self`] from gibibyte/s. - pub fn from_gibs(gibs: f64) -> Throughput { - Throughput(gibs * GIBIBYTE) - } - - /// [`Self`] as number of byte/s. - pub fn as_bytes(&self) -> f64 { - self.0 - } - - /// [`Self`] as number of kibibyte/s. - pub fn as_kibs(&self) -> f64 { - self.0 / KIBIBYTE - } - - /// [`Self`] as number of mebibyte/s. - pub fn as_mibs(&self) -> f64 { - self.0 / MEBIBYTE - } - - /// [`Self`] as number of gibibyte/s. - pub fn as_gibs(&self) -> f64 { - self.0 / GIBIBYTE - } - - /// Normalizes [`Self`] to use the largest unit possible. - pub fn normalize(&self) -> (f64, Unit) { - let bs = self.0; - - if bs >= GIBIBYTE { - (self.as_gibs(), Unit::GiBs) - } else if bs >= MEBIBYTE { - (self.as_mibs(), Unit::MiBs) - } else { - (self.as_kibs(), Unit::KiBs) - } - } -} - -impl fmt::Display for Throughput { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let (value, unit) = self.normalize(); - write!(f, "{:.2?} {}", value, unit) - } -} - -/// Serializes `Throughput` and uses MiBs as the unit. -pub fn serialize_throughput(throughput: &Throughput, serializer: S) -> Result -where - S: Serializer, -{ - serializer.serialize_u64(throughput.as_mibs() as u64) -} - -/// Serializes `Option` and uses MiBs as the unit. -pub fn serialize_throughput_option( - maybe_throughput: &Option, - serializer: S, -) -> Result -where - S: Serializer, -{ - if let Some(throughput) = maybe_throughput { - return serializer.serialize_some(&(throughput.as_mibs() as u64)) - } - serializer.serialize_none() -} - #[inline(always)] pub(crate) fn benchmark( name: &str, @@ -144,7 +39,7 @@ pub(crate) fn benchmark( max_iterations: usize, max_duration: Duration, mut run: impl FnMut() -> Result<(), E>, -) -> Result { +) -> Result { // Run the benchmark once as a warmup to get the code into the L1 cache. run()?; @@ -163,9 +58,9 @@ pub(crate) fn benchmark( } } - let score = Throughput::from_kibs((size * count) as f64 / (elapsed.as_secs_f64() * 1024.0)); + let score = ((size * count) as f64 / elapsed.as_secs_f64()) / (1024.0 * 1024.0); log::trace!( - "Calculated {} of {} in {} iterations in {}ms", + "Calculated {} of {:.2}MB/s in {} iterations in {}ms", name, score, count, @@ -225,14 +120,14 @@ fn clobber_value(input: &mut T) { pub const DEFAULT_CPU_EXECUTION_LIMIT: ExecutionLimit = ExecutionLimit::Both { max_iterations: 4 * 1024, max_duration: Duration::from_millis(100) }; -// This benchmarks the CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes per second. -pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput { +// This benchmarks the CPU speed as measured by calculating BLAKE2b-256 hashes, in MB/s. +pub fn benchmark_cpu(limit: ExecutionLimit) -> f64 { // In general the results of this benchmark are somewhat sensitive to how much - // data we hash at the time. The smaller this is the *less* B/s we can hash, - // the bigger this is the *more* B/s we can hash, up until a certain point + // data we hash at the time. The smaller this is the *less* MB/s we can hash, + // the bigger this is the *more* MB/s we can hash, up until a certain point // where we can achieve roughly ~100% of what the hasher can do. If we'd plot // this on a graph with the number of bytes we want to hash on the X axis - // and the speed in B/s on the Y axis then we'd essentially see it grow + // and the speed in MB/s on the Y axis then we'd essentially see it grow // logarithmically. // // In practice however we might not always have enough data to hit the maximum @@ -261,12 +156,12 @@ pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput { pub const DEFAULT_MEMORY_EXECUTION_LIMIT: ExecutionLimit = ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(100) }; -// This benchmarks the effective `memcpy` memory bandwidth available in bytes per second. +// This benchmarks the effective `memcpy` memory bandwidth available in MB/s. // // It doesn't technically measure the absolute maximum memory bandwidth available, // but that's fine, because real code most of the time isn't optimized to take // advantage of the full memory bandwidth either. -pub fn benchmark_memory(limit: ExecutionLimit) -> Throughput { +pub fn benchmark_memory(limit: ExecutionLimit) -> f64 { // Ideally this should be at least as big as the CPU's L3 cache, // and it should be big enough so that the `memcpy` takes enough // time to be actually measurable. @@ -358,7 +253,7 @@ pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit = pub fn benchmark_disk_sequential_writes( limit: ExecutionLimit, directory: &Path, -) -> Result { +) -> Result { const SIZE: usize = 64 * 1024 * 1024; let buffer = random_data(SIZE); @@ -400,7 +295,7 @@ pub fn benchmark_disk_sequential_writes( pub fn benchmark_disk_random_writes( limit: ExecutionLimit, directory: &Path, -) -> Result { +) -> Result { const SIZE: usize = 64 * 1024 * 1024; let buffer = random_data(SIZE); @@ -465,9 +360,9 @@ pub fn benchmark_disk_random_writes( /// Benchmarks the verification speed of sr25519 signatures. /// -/// Returns the throughput in B/s by convention. +/// Returns the throughput in MB/s by convention. /// The values are rather small (0.4-0.8) so it is advised to convert them into KB/s. -pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput { +pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> f64 { const INPUT_SIZE: usize = 32; const ITERATION_SIZE: usize = 2048; let pair = sr25519::Pair::from_string("//Alice", None).unwrap(); @@ -507,8 +402,8 @@ pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput { pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { #[allow(unused_mut)] let mut hwbench = HwBench { - cpu_hashrate_score: benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT), - memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT), + cpu_hashrate_score: benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) as u64, + memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) as u64, disk_sequential_write_score: None, disk_random_write_score: None, }; @@ -517,7 +412,7 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { hwbench.disk_sequential_write_score = match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) { - Ok(score) => Some(score), + Ok(score) => Some(score as u64), Err(error) => { log::warn!("Failed to run the sequential write disk benchmark: {}", error); None @@ -526,7 +421,7 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { hwbench.disk_random_write_score = match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) { - Ok(score) => Some(score), + Ok(score) => Some(score as u64), Err(error) => { log::warn!("Failed to run the random write disk benchmark: {}", error); None @@ -540,7 +435,6 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { #[cfg(test)] mod tests { use super::*; - use sp_runtime::assert_eq_error_rate_float; #[cfg(target_os = "linux")] #[test] @@ -556,19 +450,19 @@ mod tests { #[test] fn test_benchmark_cpu() { - assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > Throughput::from_mibs(0.0)); + assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > 0.0); } #[test] fn test_benchmark_memory() { - assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > Throughput::from_mibs(0.0)); + assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > 0.0); } #[test] fn test_benchmark_disk_sequential_writes() { assert!( benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > - Throughput::from_mibs(0.0) + 0.0 ); } @@ -576,45 +470,12 @@ mod tests { fn test_benchmark_disk_random_writes() { assert!( benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > - Throughput::from_mibs(0.0) + 0.0 ); } #[test] fn test_benchmark_sr25519_verify() { - assert!( - benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > Throughput::from_mibs(0.0) - ); - } - - /// Test the [`Throughput`]. - #[test] - fn throughput_works() { - /// Float precision. - const EPS: f64 = 0.1; - let gib = Throughput::from_gibs(14.324); - - assert_eq_error_rate_float!(14.324, gib.as_gibs(), EPS); - assert_eq_error_rate_float!(14667.776, gib.as_mibs(), EPS); - assert_eq_error_rate_float!(14667.776 * 1024.0, gib.as_kibs(), EPS); - assert_eq!("14.32 GiBs", gib.to_string()); - - let mib = Throughput::from_mibs(1029.0); - assert_eq!("1.00 GiBs", mib.to_string()); - } - - /// Test the [`HwBench`] serialization. - #[test] - fn hwbench_serialize_works() { - let hwbench = HwBench { - cpu_hashrate_score: Throughput::from_gibs(1.32), - memory_memcpy_score: Throughput::from_kibs(9342.432), - disk_sequential_write_score: Some(Throughput::from_kibs(4332.12)), - disk_random_write_score: None, - }; - - let serialized = serde_json::to_string(&hwbench).unwrap(); - // Throughput from all of the benchmarks should be converted to MiBs. - assert_eq!(serialized, "{\"cpu_hashrate_score\":1351,\"memory_memcpy_score\":9,\"disk_sequential_write_score\":4}"); + assert!(benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > 0.0); } } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index f8c6f281546db..0d966dacfc2c8 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -16,12 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.19" futures = "0.3.21" -libp2p = { version = "0.49.0", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" -parking_lot = "0.12.1" -pin-project = "1.0.12" +parking_lot = "0.12.0" +pin-project = "1.0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.79" thiserror = "1.0.30" wasm-timer = "0.2.5" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 43fa2d4e52e8a..476e03ee741f3 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -20,8 +20,8 @@ lazy_static = "1.4.0" libc = "0.2.121" log = { version = "0.4.17" } once_cell = "1.8.0" -parking_lot = "0.12.1" -regex = "1.6.0" +parking_lot = "0.12.0" +regex = "1.5.5" rustc-hash = "1.1.0" serde = "1.0.136" thiserror = "1.0.30" diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 63fd1de374cba..2a034f06ce8a8 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -225,7 +225,7 @@ where .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; let extrinsics = self .client - .block_body(self.block) + .block_body(&id) .map_err(Error::InvalidBlockId)? .ok_or_else(|| Error::MissingBlockComponent("Extrinsics not found".to_string()))?; tracing::debug!(target: "state_tracing", "Found {} extrinsics", extrinsics.len()); diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 978e24df68d78..58941617bfb6a 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -133,14 +133,7 @@ where .add_directive( parse_default_directive("cranelift_wasm=warn").expect("provided directive is valid"), ) - .add_directive(parse_default_directive("hyper=warn").expect("provided directive is valid")) - .add_directive( - parse_default_directive("trust_dns_proto=off").expect("provided directive is valid"), - ) - .add_directive( - parse_default_directive("libp2p_mdns::behaviour::iface=off") - .expect("provided directive is valid"), - ); + .add_directive(parse_default_directive("hyper=warn").expect("provided directive is valid")); if let Ok(lvl) = std::env::var("RUST_LOG") { if lvl != "" { diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 0bdfb623e6c14..3fdcde48d9e22 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -13,14 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } -parking_lot = "0.12.1" +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parking_lot = "0.12.0" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } @@ -35,9 +34,9 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } [dev-dependencies] -array-bytes = "4.1" assert_matches = "1.3.0" criterion = "0.3" +hex = "0.4" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index 366d0eb99b945..d34ffe512b023 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -9,13 +9,9 @@ repository = "https://github.com/paritytech/substrate/" description = "Transaction pool client facing API." [dependencies] -async-trait = "0.1.57" futures = "0.3.21" log = "0.4.17" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } - -[dev-dependencies] -serde_json = "1.0" diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index c1e49ad07d7b1..0ebb8f9d4cd9c 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -21,7 +21,6 @@ pub mod error; -use async_trait::async_trait; use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_runtime::{ @@ -109,18 +108,15 @@ pub enum TransactionStatus { Ready, /// The transaction has been broadcast to the given peers. Broadcast(Vec), - /// Transaction has been included in block with given hash - /// at the given position. - #[serde(with = "v1_compatible")] - InBlock((BlockHash, TxIndex)), + /// Transaction has been included in block with given hash. + InBlock(BlockHash), /// The block this transaction was included in has been retracted. Retracted(BlockHash), /// Maximum number of finality watchers has been reached, /// old watchers are being removed. FinalityTimeout(BlockHash), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA. - #[serde(with = "v1_compatible")] - Finalized((BlockHash, TxIndex)), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + Finalized(BlockHash), /// Transaction has been replaced in the pool, by another transaction /// that provides the same tags. (e.g. same (sender, nonce)). Usurped(Hash), @@ -147,8 +143,6 @@ pub type TransactionFor

= <

::Block as BlockT>::Extrinsi pub type TransactionStatusStreamFor

= TransactionStatusStream, BlockHash

>; /// Transaction type for a local pool. pub type LocalTransactionFor

= <

::Block as BlockT>::Extrinsic; -/// Transaction's index within the block in which it was included. -pub type TxIndex = usize; /// Typical future type used in transaction pool api. pub type PoolFuture = std::pin::Pin> + Send>>; @@ -304,10 +298,9 @@ pub enum ChainEvent { } /// Trait for transaction pool maintenance. -#[async_trait] pub trait MaintainedTransactionPool: TransactionPool { /// Perform maintenance - async fn maintain(&self, event: ChainEvent); + fn maintain(&self, event: ChainEvent) -> Pin + Send>>; } /// Transaction pool interface for submitting local transactions that exposes a @@ -369,52 +362,3 @@ impl OffchainSubmitTransaction for TP }) } } - -/// Wrapper functions to keep the API backwards compatible over the wire for the old RPC spec. -mod v1_compatible { - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - pub fn serialize(data: &(H, usize), serializer: S) -> Result - where - S: Serializer, - H: Serialize, - { - let (hash, _) = data; - serde::Serialize::serialize(&hash, serializer) - } - - pub fn deserialize<'de, D, H>(deserializer: D) -> Result<(H, usize), D::Error> - where - D: Deserializer<'de>, - H: Deserialize<'de>, - { - let hash: H = serde::Deserialize::deserialize(deserializer)?; - Ok((hash, 0)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn tx_status_compatibility() { - let event: TransactionStatus = TransactionStatus::InBlock((1, 2)); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"inBlock":1}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionStatus = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, TransactionStatus::InBlock((1, 0))); - - let event: TransactionStatus = TransactionStatus::Finalized((1, 2)); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"finalized":1}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionStatus = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, TransactionStatus::Finalized((1, 0))); - } -} diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index 602e84b47775c..c3577a45faf07 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -66,7 +66,7 @@ impl ChainApi for TestApi { uxt: ::Extrinsic, ) -> Self::ValidationFuture { let nonce = uxt.transfer().nonce; - let from = uxt.transfer().from; + let from = uxt.transfer().from.clone(); match self.block_id_to_number(at) { Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), @@ -76,7 +76,7 @@ impl ChainApi for TestApi { ready(Ok(Ok(ValidTransaction { priority: 4, requires: if nonce > 1 && self.nonce_dependant { - vec![to_tag(nonce - 1, from)] + vec![to_tag(nonce - 1, from.clone())] } else { vec![] }, @@ -111,7 +111,7 @@ impl ChainApi for TestApi { (blake2_256(&encoded).into(), encoded.len()) } - fn block_body(&self, _id: ::Hash) -> Self::BodyFuture { + fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { ready(Ok(None)) } @@ -121,14 +121,6 @@ impl ChainApi for TestApi { ) -> Result::Header>, Self::Error> { Ok(None) } - - fn tree_route( - &self, - _from: ::Hash, - _to: ::Hash, - ) -> Result, Self::Error> { - unimplemented!() - } } fn uxt(transfer: Transfer) -> Extrinsic { diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index c3f9b50f9482d..4710c96b003cd 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -30,7 +30,6 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{blockchain::HeaderBackend, BlockBackend}; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_blockchain::{HeaderMetadata, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, @@ -112,11 +111,8 @@ impl FullChainApi { impl graph::ChainApi for FullChainApi where Block: BlockT, - Client: ProvideRuntimeApi - + BlockBackend - + BlockIdTo - + HeaderBackend - + HeaderMetadata, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -126,8 +122,8 @@ where Pin> + Send>>; type BodyFuture = Ready::Extrinsic>>>>; - fn block_body(&self, hash: Block::Hash) -> Self::BodyFuture { - ready(self.client.block_body(hash).map_err(error::Error::from)) + fn block_body(&self, id: &BlockId) -> Self::BodyFuture { + ready(self.client.block_body(id).map_err(error::Error::from)) } fn validate_transaction( @@ -194,14 +190,6 @@ where ) -> Result::Header>, Self::Error> { self.client.header(*at).map_err(Into::into) } - - fn tree_route( - &self, - from: ::Hash, - to: ::Hash, - ) -> Result, Self::Error> { - sp_blockchain::tree_route::(&*self.client, from, to).map_err(Into::into) - } } /// Helper function to validate a transaction using a full chain API. @@ -214,11 +202,8 @@ fn validate_transaction_blocking( ) -> error::Result where Block: BlockT, - Client: ProvideRuntimeApi - + BlockBackend - + BlockIdTo - + HeaderBackend - + HeaderMetadata, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -279,11 +264,8 @@ where impl FullChainApi where Block: BlockT, - Client: ProvideRuntimeApi - + BlockBackend - + BlockIdTo - + HeaderBackend - + HeaderMetadata, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs deleted file mode 100644 index b347de824fa12..0000000000000 --- a/client/transaction-pool/src/enactment_state.rs +++ /dev/null @@ -1,607 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate transaction pool implementation. - -use sc_transaction_pool_api::ChainEvent; -use sp_blockchain::TreeRoute; -use sp_runtime::traits::Block as BlockT; - -/// Helper struct for keeping track of the current state of processed new best -/// block and finalized events. The main purpose of keeping track of this state -/// is to figure out if a transaction pool enactment is needed or not. -/// -/// Given the following chain: -/// -/// B1-C1-D1-E1 -/// / -/// A -/// \ -/// B2-C2-D2-E2 -/// -/// Some scenarios and expected behavior for sequence of `NewBestBlock` (`nbb`) and `Finalized` -/// (`f`) events: -/// -/// - `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(C1))` -/// - `f(C1)`, `nbb(C1)` -> false (enactment was already performed in `f(C1))` -/// - `f(C1)`, `nbb(D2)` -> false (enactment was already performed in `f(C1)`, -/// we should not retract finalized block) -/// - `f(C1)`, `f(C2)`, `nbb(C1)` -> false -/// - `nbb(C1)`, `nbb(C2)` -> true (switching fork is OK) -/// - `nbb(B1)`, `nbb(B2)` -> true -/// - `nbb(B1)`, `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(B1)`) -/// - `nbb(C1)`, `f(B1)` -> false (enactment was already performed in `nbb(B2)`) -pub struct EnactmentState -where - Block: BlockT, -{ - recent_best_block: Block::Hash, - recent_finalized_block: Block::Hash, -} - -impl EnactmentState -where - Block: BlockT, -{ - /// Returns a new `EnactmentState` initialized with the given parameters. - pub fn new(recent_best_block: Block::Hash, recent_finalized_block: Block::Hash) -> Self { - EnactmentState { recent_best_block, recent_finalized_block } - } - - /// Returns the recently finalized block. - pub fn recent_finalized_block(&self) -> Block::Hash { - self.recent_finalized_block - } - - /// Updates the state according to the given `ChainEvent`, returning - /// `Some(tree_route)` with a tree route including the blocks that need to - /// be enacted/retracted. If no enactment is needed then `None` is returned. - pub fn update( - &mut self, - event: &ChainEvent, - tree_route: &F, - ) -> Result>, String> - where - F: Fn(Block::Hash, Block::Hash) -> Result, String>, - { - let (new_hash, finalized) = match event { - ChainEvent::NewBestBlock { hash, .. } => (*hash, false), - ChainEvent::Finalized { hash, .. } => (*hash, true), - }; - - // block was already finalized - if self.recent_finalized_block == new_hash { - log::debug!(target: "txpool", "handle_enactment: block already finalized"); - return Ok(None) - } - - // compute actual tree route from best_block to notified block, and use - // it instead of tree_route provided with event - let tree_route = tree_route(self.recent_best_block, new_hash)?; - - log::debug!( - target: "txpool", - "resolve hash:{:?} finalized:{:?} tree_route:{:?} best_block:{:?} finalized_block:{:?}", - new_hash, finalized, tree_route, self.recent_best_block, self.recent_finalized_block - ); - - // check if recently finalized block is on retracted path. this could be - // happening if we first received a finalization event and then a new - // best event for some old stale best head. - if tree_route.retracted().iter().any(|x| x.hash == self.recent_finalized_block) { - log::debug!( - target: "txpool", - "Recently finalized block {} would be retracted by ChainEvent {}, skipping", - self.recent_finalized_block, new_hash - ); - return Ok(None) - } - - if finalized { - self.recent_finalized_block = new_hash; - - // if there are no enacted blocks in best_block -> hash tree_route, - // it means that block being finalized was already enacted (this - // case also covers best_block == new_hash), recent_best_block - // remains valid. - if tree_route.enacted().is_empty() { - log::trace!( - target: "txpool", - "handle_enactment: no newly enacted blocks since recent best block" - ); - return Ok(None) - } - - // otherwise enacted finalized block becomes best block... - } - - self.recent_best_block = new_hash; - - Ok(Some(tree_route)) - } - - /// Forces update of the state according to the given `ChainEvent`. Intended to be used as a - /// fallback when tree_route cannot be computed. - pub fn force_update(&mut self, event: &ChainEvent) { - match event { - ChainEvent::NewBestBlock { hash, .. } => self.recent_best_block = *hash, - ChainEvent::Finalized { hash, .. } => self.recent_finalized_block = *hash, - }; - log::debug!(target: "txpool", "forced update: {:?}, {:?}", self.recent_best_block, self.recent_finalized_block); - } -} - -#[cfg(test)] -mod enactment_state_tests { - use super::EnactmentState; - use sc_transaction_pool_api::ChainEvent; - use sp_blockchain::{HashAndNumber, TreeRoute}; - use std::sync::Arc; - use substrate_test_runtime_client::runtime::{Block, Hash}; - - // some helpers for convenient blocks' hash naming - fn a() -> HashAndNumber { - HashAndNumber { number: 1, hash: Hash::from([0xAA; 32]) } - } - fn b1() -> HashAndNumber { - HashAndNumber { number: 2, hash: Hash::from([0xB1; 32]) } - } - fn c1() -> HashAndNumber { - HashAndNumber { number: 3, hash: Hash::from([0xC1; 32]) } - } - fn d1() -> HashAndNumber { - HashAndNumber { number: 4, hash: Hash::from([0xD1; 32]) } - } - fn e1() -> HashAndNumber { - HashAndNumber { number: 5, hash: Hash::from([0xE1; 32]) } - } - fn b2() -> HashAndNumber { - HashAndNumber { number: 2, hash: Hash::from([0xB2; 32]) } - } - fn c2() -> HashAndNumber { - HashAndNumber { number: 3, hash: Hash::from([0xC2; 32]) } - } - fn d2() -> HashAndNumber { - HashAndNumber { number: 4, hash: Hash::from([0xD2; 32]) } - } - fn e2() -> HashAndNumber { - HashAndNumber { number: 5, hash: Hash::from([0xE2; 32]) } - } - - /// mock tree_route computing function for simple two-forks chain - fn tree_route(from: Hash, to: Hash) -> Result, String> { - let chain = vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()]; - let pivot = 4_usize; - - let from = chain - .iter() - .position(|bn| bn.hash == from) - .ok_or("existing block should be given")?; - let to = chain - .iter() - .position(|bn| bn.hash == to) - .ok_or("existing block should be given")?; - - // B1-C1-D1-E1 - // / - // A - // \ - // B2-C2-D2-E2 - // - // [E1 D1 C1 B1 A B2 C2 D2 E2] - - let vec: Vec> = if from < to { - chain.into_iter().skip(from).take(to - from + 1).collect() - } else { - chain.into_iter().skip(to).take(from - to + 1).rev().collect() - }; - - let pivot = if from <= pivot && to <= pivot { - if from < to { - to - from - } else { - 0 - } - } else if from >= pivot && to >= pivot { - if from < to { - 0 - } else { - from - to - } - } else { - if from < to { - pivot - from - } else { - from - pivot - } - }; - - Ok(TreeRoute::new(vec, pivot)) - } - - mod mock_tree_route_tests { - use super::*; - - /// asserts that tree routes are equal - fn assert_treeroute_eq(expected: TreeRoute, result: TreeRoute) { - assert_eq!(result.common_block().hash, expected.common_block().hash); - assert_eq!(result.enacted().len(), expected.enacted().len()); - assert_eq!(result.retracted().len(), expected.retracted().len()); - assert!(result - .enacted() - .iter() - .zip(expected.enacted().iter()) - .all(|(a, b)| a.hash == b.hash)); - assert!(result - .retracted() - .iter() - .zip(expected.retracted().iter()) - .all(|(a, b)| a.hash == b.hash)); - } - - // some tests for mock tree_route function - #[test] - fn tree_route_mock_test_01() { - let result = tree_route(b1().hash, a().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![b1(), a()], 1); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_02() { - let result = tree_route(a().hash, b1().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![a(), b1()], 0); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_03() { - let result = tree_route(a().hash, c2().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![a(), b2(), c2()], 0); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_04() { - let result = tree_route(e2().hash, a().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a()], 4); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_05() { - let result = tree_route(d1().hash, b1().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![d1(), c1(), b1()], 2); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_06() { - let result = tree_route(d2().hash, b2().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![d2(), c2(), b2()], 2); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_07() { - let result = tree_route(b1().hash, d1().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![b1(), c1(), d1()], 0); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_08() { - let result = tree_route(b2().hash, d2().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![b2(), c2(), d2()], 0); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_09() { - let result = tree_route(e2().hash, e1().hash).expect("tree route exists"); - let expected = - TreeRoute::new(vec![e2(), d2(), c2(), b2(), a(), b1(), c1(), d1(), e1()], 4); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_10() { - let result = tree_route(e1().hash, e2().hash).expect("tree route exists"); - let expected = - TreeRoute::new(vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()], 4); - assert_treeroute_eq(result, expected); - } - #[test] - fn tree_route_mock_test_11() { - let result = tree_route(b1().hash, c2().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![b1(), a(), b2(), c2()], 1); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_12() { - let result = tree_route(d2().hash, b1().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![d2(), c2(), b2(), a(), b1()], 3); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_13() { - let result = tree_route(c2().hash, e1().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![c2(), b2(), a(), b1(), c1(), d1(), e1()], 2); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_14() { - let result = tree_route(b1().hash, b1().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![b1()], 0); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_15() { - let result = tree_route(b2().hash, b2().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![b2()], 0); - assert_treeroute_eq(result, expected); - } - - #[test] - fn tree_route_mock_test_16() { - let result = tree_route(a().hash, a().hash).expect("tree route exists"); - let expected = TreeRoute::new(vec![a()], 0); - assert_treeroute_eq(result, expected); - } - } - - fn trigger_new_best_block( - state: &mut EnactmentState, - from: HashAndNumber, - acted_on: HashAndNumber, - ) -> bool { - let (from, acted_on) = (from.hash, acted_on.hash); - - let event_tree_route = tree_route(from, acted_on).expect("Tree route exists"); - - state - .update( - &ChainEvent::NewBestBlock { - hash: acted_on, - tree_route: Some(Arc::new(event_tree_route)), - }, - &tree_route, - ) - .unwrap() - .is_some() - } - - fn trigger_finalized( - state: &mut EnactmentState, - from: HashAndNumber, - acted_on: HashAndNumber, - ) -> bool { - let (from, acted_on) = (from.hash, acted_on.hash); - - let v = tree_route(from, acted_on) - .expect("Tree route exists") - .enacted() - .iter() - .map(|h| h.hash) - .collect::>(); - - state - .update(&ChainEvent::Finalized { hash: acted_on, tree_route: v.into() }, &tree_route) - .unwrap() - .is_some() - } - - fn assert_es_eq( - es: &EnactmentState, - expected_best_block: HashAndNumber, - expected_finalized_block: HashAndNumber, - ) { - assert_eq!(es.recent_best_block, expected_best_block.hash); - assert_eq!(es.recent_finalized_block, expected_finalized_block.hash); - } - - #[test] - fn test_enactment_helper() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - // B1-C1-D1-E1 - // / - // A - // \ - // B2-C2-D2-E2 - - let result = trigger_new_best_block(&mut es, a(), d1()); - assert!(result); - assert_es_eq(&es, d1(), a()); - - let result = trigger_new_best_block(&mut es, d1(), e1()); - assert!(result); - assert_es_eq(&es, e1(), a()); - - let result = trigger_finalized(&mut es, a(), d2()); - assert!(result); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_new_best_block(&mut es, d2(), e1()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_finalized(&mut es, a(), b2()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_finalized(&mut es, a(), b1()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_new_best_block(&mut es, a(), d2()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_finalized(&mut es, a(), d2()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_new_best_block(&mut es, a(), c2()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_new_best_block(&mut es, a(), c1()); - assert_eq!(result, false); - assert_es_eq(&es, d2(), d2()); - - let result = trigger_new_best_block(&mut es, d2(), e2()); - assert!(result); - assert_es_eq(&es, e2(), d2()); - - let result = trigger_finalized(&mut es, d2(), e2()); - assert_eq!(result, false); - assert_es_eq(&es, e2(), e2()); - } - - #[test] - fn test_enactment_helper_2() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - // A-B1-C1-D1-E1 - - let result = trigger_new_best_block(&mut es, a(), b1()); - assert!(result); - assert_es_eq(&es, b1(), a()); - - let result = trigger_new_best_block(&mut es, b1(), c1()); - assert!(result); - assert_es_eq(&es, c1(), a()); - - let result = trigger_new_best_block(&mut es, c1(), d1()); - assert!(result); - assert_es_eq(&es, d1(), a()); - - let result = trigger_new_best_block(&mut es, d1(), e1()); - assert!(result); - assert_es_eq(&es, e1(), a()); - - let result = trigger_finalized(&mut es, a(), c1()); - assert_eq!(result, false); - assert_es_eq(&es, e1(), c1()); - - let result = trigger_finalized(&mut es, c1(), e1()); - assert_eq!(result, false); - assert_es_eq(&es, e1(), e1()); - } - - #[test] - fn test_enactment_helper_3() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - // A-B1-C1-D1-E1 - - let result = trigger_new_best_block(&mut es, a(), e1()); - assert!(result); - assert_es_eq(&es, e1(), a()); - - let result = trigger_finalized(&mut es, a(), b1()); - assert_eq!(result, false); - assert_es_eq(&es, e1(), b1()); - } - - #[test] - fn test_enactment_helper_4() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - // A-B1-C1-D1-E1 - - let result = trigger_finalized(&mut es, a(), e1()); - assert!(result); - assert_es_eq(&es, e1(), e1()); - - let result = trigger_finalized(&mut es, e1(), b1()); - assert_eq!(result, false); - assert_es_eq(&es, e1(), e1()); - } - - #[test] - fn test_enactment_helper_5() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - // B1-C1-D1-E1 - // / - // A - // \ - // B2-C2-D2-E2 - - let result = trigger_finalized(&mut es, a(), e1()); - assert!(result); - assert_es_eq(&es, e1(), e1()); - - let result = trigger_finalized(&mut es, e1(), e2()); - assert_eq!(result, false); - assert_es_eq(&es, e1(), e1()); - } - - #[test] - fn test_enactment_helper_6() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - // A-B1-C1-D1-E1 - - let result = trigger_new_best_block(&mut es, a(), b1()); - assert!(result); - assert_es_eq(&es, b1(), a()); - - let result = trigger_finalized(&mut es, a(), d1()); - assert!(result); - assert_es_eq(&es, d1(), d1()); - - let result = trigger_new_best_block(&mut es, a(), e1()); - assert!(result); - assert_es_eq(&es, e1(), d1()); - - let result = trigger_new_best_block(&mut es, a(), c1()); - assert_eq!(result, false); - assert_es_eq(&es, e1(), d1()); - } - - #[test] - fn test_enactment_forced_update_best_block() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - es.force_update(&ChainEvent::NewBestBlock { hash: b1().hash, tree_route: None }); - assert_es_eq(&es, b1(), a()); - } - - #[test] - fn test_enactment_forced_update_finalize() { - sp_tracing::try_init_simple(); - let mut es = EnactmentState::new(a().hash, a().hash); - - es.force_update(&ChainEvent::Finalized { hash: b1().hash, tree_route: Arc::from([]) }); - assert_es_eq(&es, a(), b1()); - } -} diff --git a/client/transaction-pool/src/graph/listener.rs b/client/transaction-pool/src/graph/listener.rs index 776749abf2d5d..d4f42b32fdbb8 100644 --- a/client/transaction-pool/src/graph/listener.rs +++ b/client/transaction-pool/src/graph/listener.rs @@ -104,18 +104,13 @@ impl Listener { /// Transaction was pruned from the pool. pub fn pruned(&mut self, block_hash: BlockHash, tx: &H) { debug!(target: "txpool", "[{:?}] Pruned at {:?}", tx, block_hash); - // Get the transactions included in the given block hash. - let txs = self.finality_watchers.entry(block_hash).or_insert(vec![]); - txs.push(tx.clone()); - // Current transaction is the last one included. - let tx_index = txs.len() - 1; - - self.fire(tx, |watcher| watcher.in_block(block_hash, tx_index)); + self.fire(tx, |s| s.in_block(block_hash)); + self.finality_watchers.entry(block_hash).or_insert(vec![]).push(tx.clone()); while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { if let Some((hash, txs)) = self.finality_watchers.pop_front() { for tx in txs { - self.fire(&tx, |watcher| watcher.finality_timeout(hash)); + self.fire(&tx, |s| s.finality_timeout(hash)); } } } @@ -125,7 +120,7 @@ impl Listener { pub fn retracted(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { for hash in hashes { - self.fire(&hash, |watcher| watcher.retracted(block_hash)) + self.fire(&hash, |s| s.retracted(block_hash)) } } } @@ -133,9 +128,9 @@ impl Listener { /// Notify all watchers that transactions have been finalized pub fn finalized(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { - for (tx_index, hash) in hashes.into_iter().enumerate() { + for hash in hashes { log::debug!(target: "txpool", "[{:?}] Sent finalization event (block {:?})", hash, block_hash); - self.fire(&hash, |watcher| watcher.finalized(block_hash, tx_index)) + self.fire(&hash, |s| s.finalized(block_hash)) } } } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 7b3a8db15982a..4ce7954f8d479 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -20,7 +20,6 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use futures::{channel::mpsc::Receiver, Future}; use sc_transaction_pool_api::error; -use sp_blockchain::TreeRoute; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, SaturatedConversion}, @@ -90,21 +89,14 @@ pub trait ChainApi: Send + Sync { /// Returns hash and encoding length of the extrinsic. fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (ExtrinsicHash, usize); - /// Returns a block body given the block. - fn block_body(&self, at: ::Hash) -> Self::BodyFuture; + /// Returns a block body given the block id. + fn block_body(&self, at: &BlockId) -> Self::BodyFuture; /// Returns a block header given the block id. fn block_header( &self, at: &BlockId, ) -> Result::Header>, Self::Error>; - - /// Compute a tree-route between two blocks. See [`TreeRoute`] for more details. - fn tree_route( - &self, - from: ::Hash, - to: ::Hash, - ) -> Result, Self::Error>; } /// Pool configuration options. @@ -168,7 +160,7 @@ impl Pool { ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await?; - Ok(self.validated_pool.submit(validated_transactions.into_values())) + Ok(self.validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx))) } /// Resubmit the given extrinsics to the pool. @@ -182,7 +174,7 @@ impl Pool { ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await?; - Ok(self.validated_pool.submit(validated_transactions.into_values())) + Ok(self.validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx))) } /// Imports one unverified extrinsic to the pool @@ -349,7 +341,7 @@ impl Pool { at, known_imported_hashes, pruned_hashes, - reverified_transactions.into_values().collect(), + reverified_transactions.into_iter().map(|(_, xt)| xt).collect(), ) } @@ -649,7 +641,7 @@ mod tests { .unwrap(); // when - block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1])).unwrap(); + block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); // then assert!(pool.validated_pool.is_banned(&hash1)); @@ -778,7 +770,7 @@ mod tests { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!( stream.next(), - Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), + Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())), ); } @@ -801,8 +793,12 @@ mod tests { assert_eq!(pool.validated_pool().status().future, 0); // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![*watcher.hash()])) - .unwrap(); + block_on(pool.prune_tags( + &BlockId::Number(2), + vec![vec![0u8]], + vec![watcher.hash().clone()], + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -811,7 +807,7 @@ mod tests { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!( stream.next(), - Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), + Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())), ); } diff --git a/client/transaction-pool/src/graph/rotator.rs b/client/transaction-pool/src/graph/rotator.rs index 47e00a1292155..c91c8e407bc0f 100644 --- a/client/transaction-pool/src/graph/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -120,7 +120,7 @@ mod tests { let tx = Transaction { data: (), bytes: 1, - hash, + hash: hash.clone(), priority: 5, valid_till: 1, requires: vec![], diff --git a/client/transaction-pool/src/graph/watcher.rs b/client/transaction-pool/src/graph/watcher.rs index 0613300c8684b..8cd78cfc78240 100644 --- a/client/transaction-pool/src/graph/watcher.rs +++ b/client/transaction-pool/src/graph/watcher.rs @@ -84,13 +84,13 @@ impl Sender { } /// Extrinsic has been included in block with given hash. - pub fn in_block(&mut self, hash: BH, index: usize) { - self.send(TransactionStatus::InBlock((hash, index))); + pub fn in_block(&mut self, hash: BH) { + self.send(TransactionStatus::InBlock(hash)); } /// Extrinsic has been finalized by a finality gadget. - pub fn finalized(&mut self, hash: BH, index: usize) { - self.send(TransactionStatus::Finalized((hash, index))); + pub fn finalized(&mut self, hash: BH) { + self.send(TransactionStatus::Finalized(hash)); self.is_finalized = true; } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index a441bf9b2a9a0..7b9ce9d6047c0 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -23,7 +23,6 @@ #![warn(unused_extern_crates)] mod api; -mod enactment_state; pub mod error; mod graph; mod metrics; @@ -32,8 +31,6 @@ mod revalidation; mod tests; pub use crate::api::FullChainApi; -use async_trait::async_trait; -use enactment_state::EnactmentState; use futures::{ channel::oneshot, future::{self, ready}, @@ -65,8 +62,6 @@ use std::time::Instant; use crate::metrics::MetricsLink as PrometheusMetrics; use prometheus_endpoint::Registry as PrometheusRegistry; -use sp_blockchain::{HashAndNumber, TreeRoute}; - type BoxedReadyIterator = Box>> + Send>; @@ -90,7 +85,6 @@ where revalidation_queue: Arc>, ready_poll: Arc, Block>>>, metrics: PrometheusMetrics, - enactment_state: Arc>>, } struct ReadyPoll { @@ -169,11 +163,7 @@ where PoolApi: graph::ChainApi + 'static, { /// Create new basic transaction pool with provided api, for tests. - pub fn new_test( - pool_api: Arc, - best_block_hash: Block::Hash, - finalized_hash: Block::Hash, - ) -> (Self, Pin + Send>>) { + pub fn new_test(pool_api: Arc) -> (Self, Pin + Send>>) { let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); @@ -185,10 +175,6 @@ where revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), ready_poll: Default::default(), metrics: Default::default(), - enactment_state: Arc::new(Mutex::new(EnactmentState::new( - best_block_hash, - finalized_hash, - ))), }, background_task, ) @@ -204,8 +190,6 @@ where revalidation_type: RevalidationType, spawner: impl SpawnEssentialNamed, best_block_number: NumberFor, - best_block_hash: Block::Hash, - finalized_hash: Block::Hash, ) -> Self { let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { @@ -233,10 +217,6 @@ where })), ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), metrics: PrometheusMetrics::new(prometheus), - enactment_state: Arc::new(Mutex::new(EnactmentState::new( - best_block_hash, - finalized_hash, - ))), } } @@ -378,7 +358,6 @@ where + sp_runtime::traits::BlockIdTo + sc_client_api::ExecutorProvider + sc_client_api::UsageProvider - + sp_blockchain::HeaderMetadata + Send + Sync + 'static, @@ -401,8 +380,6 @@ where RevalidationType::Full, spawner, client.usage_info().chain.best_number, - client.usage_info().chain.best_hash, - client.usage_info().chain.finalized_hash, )); // make transaction pool available for off-chain runtime calls. @@ -419,8 +396,7 @@ where Client: sp_api::ProvideRuntimeApi + sc_client_api::BlockBackend + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo - + sp_blockchain::HeaderMetadata, + + sp_runtime::traits::BlockIdTo, Client: Send + Sync + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, { @@ -550,12 +526,12 @@ impl RevalidationStatus { /// Prune the known txs for the given block. async fn prune_known_txs_for_block>( - block_hash: Block::Hash, + block_id: BlockId, api: &Api, pool: &graph::Pool, ) -> Vec> { let extrinsics = api - .block_body(block_hash) + .block_body(&block_id) .await .unwrap_or_else(|e| { log::warn!("Prune known transactions: error request: {}", e); @@ -567,21 +543,19 @@ async fn prune_known_txs_for_block h, Ok(None) => { - log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); + log::debug!(target: "txpool", "Could not find header for {:?}.", block_id); return hashes }, Err(e) => { - log::debug!(target: "txpool", "Error retrieving header for {:?}: {}", block_hash, e); + log::debug!(target: "txpool", "Error retrieving header for {:?}: {}", block_id, e); return hashes }, }; - if let Err(e) = pool - .prune(&BlockId::Hash(block_hash), &BlockId::hash(*header.parent_hash()), &extrinsics) - .await + if let Err(e) = pool.prune(&block_id, &BlockId::hash(*header.parent_hash()), &extrinsics).await { log::error!("Cannot prune known in the pool: {}", e); } @@ -589,190 +563,166 @@ async fn prune_known_txs_for_block BasicPool +impl MaintainedTransactionPool for BasicPool where Block: BlockT, PoolApi: 'static + graph::ChainApi, { - /// Handles enactment and retraction of blocks, prunes stale transactions - /// (that have already been enacted) and resubmits transactions that were - /// retracted. - async fn handle_enactment(&self, tree_route: TreeRoute) { - log::trace!(target: "txpool", "handle_enactment tree_route: {tree_route:?}"); - let pool = self.pool.clone(); - let api = self.api.clone(); - - let (hash, block_number) = match tree_route.last() { - Some(HashAndNumber { hash, number }) => (hash, number), - None => { - log::warn!( - target: "txpool", - "Skipping ChainEvent - no last block in tree route {:?}", - tree_route, - ); - return - }, - }; - - let next_action = self.revalidation_strategy.lock().next( - *block_number, - Some(std::time::Duration::from_secs(60)), - Some(20u32.into()), - ); - - // We keep track of everything we prune so that later we won't add - // transactions with those hashes from the retracted blocks. - let mut pruned_log = HashSet::>::new(); - - // If there is a tree route, we use this to prune known tx based on the enacted - // blocks. Before pruning enacted transactions, we inform the listeners about - // retracted blocks and their transactions. This order is important, because - // if we enact and retract the same transaction at the same time, we want to - // send first the retract and than the prune event. - for retracted in tree_route.retracted() { - // notify txs awaiting finality that it has been retracted - pool.validated_pool().on_block_retracted(retracted.hash); - } - - future::join_all( - tree_route - .enacted() - .iter() - .map(|h| prune_known_txs_for_block(h.hash, &*api, &*pool)), - ) - .await - .into_iter() - .for_each(|enacted_log| { - pruned_log.extend(enacted_log); - }); - - self.metrics - .report(|metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64)); - - if next_action.resubmit { - let mut resubmit_transactions = Vec::new(); - - for retracted in tree_route.retracted() { - let hash = retracted.hash; - - let block_transactions = api - .block_body(hash) - .await - .unwrap_or_else(|e| { - log::warn!("Failed to fetch block body: {}", e); - None - }) - .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - - let mut resubmitted_to_report = 0; - - resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| { - let tx_hash = pool.hash_of(tx); - let contains = pruned_log.contains(&tx_hash); - - // need to count all transactions, not just filtered, here - resubmitted_to_report += 1; - - if !contains { - log::debug!( + fn maintain(&self, event: ChainEvent) -> Pin + Send>> { + match event { + ChainEvent::NewBestBlock { hash, tree_route } => { + let pool = self.pool.clone(); + let api = self.api.clone(); + + let id = BlockId::hash(hash); + let block_number = match api.block_id_to_number(&id) { + Ok(Some(number)) => number, + _ => { + log::trace!( target: "txpool", - "[{:?}]: Resubmitting from retracted block {:?}", - tx_hash, - hash, + "Skipping chain event - no number for that block {:?}", + id, ); + return Box::pin(ready(())) + }, + }; + + let next_action = self.revalidation_strategy.lock().next( + block_number, + Some(std::time::Duration::from_secs(60)), + Some(20u32.into()), + ); + let revalidation_strategy = self.revalidation_strategy.clone(); + let revalidation_queue = self.revalidation_queue.clone(); + let ready_poll = self.ready_poll.clone(); + let metrics = self.metrics.clone(); + + async move { + // We keep track of everything we prune so that later we won't add + // transactions with those hashes from the retracted blocks. + let mut pruned_log = HashSet::>::new(); + + // If there is a tree route, we use this to prune known tx based on the enacted + // blocks. Before pruning enacted transactions, we inform the listeners about + // retracted blocks and their transactions. This order is important, because + // if we enact and retract the same transaction at the same time, we want to + // send first the retract and than the prune event. + if let Some(ref tree_route) = tree_route { + for retracted in tree_route.retracted() { + // notify txs awaiting finality that it has been retracted + pool.validated_pool().on_block_retracted(retracted.hash); + } + + future::join_all(tree_route.enacted().iter().map(|h| { + prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool) + })) + .await + .into_iter() + .for_each(|enacted_log| { + pruned_log.extend(enacted_log); + }) } - !contains - })); - self.metrics.report(|metrics| { - metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) - }); - } - - if let Err(e) = pool - .resubmit_at( - &BlockId::Hash(*hash), - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await - { - log::debug!( - target: "txpool", - "[{:?}] Error re-submitting transactions: {}", - hash, - e, - ) - } - } - - let extra_pool = pool.clone(); - // After #5200 lands, this arguably might be moved to the - // handler of "all blocks notification". - self.ready_poll - .lock() - .trigger(*block_number, move || Box::new(extra_pool.validated_pool().ready())); + pruned_log.extend(prune_known_txs_for_block(id, &*api, &*pool).await); + + metrics.report(|metrics| { + metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) + }); + + if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) { + let mut resubmit_transactions = Vec::new(); + + for retracted in tree_route.retracted() { + let hash = retracted.hash; + + let block_transactions = api + .block_body(&BlockId::hash(hash)) + .await + .unwrap_or_else(|e| { + log::warn!("Failed to fetch block body: {}", e); + None + }) + .unwrap_or_default() + .into_iter() + .filter(|tx| tx.is_signed().unwrap_or(true)); + + let mut resubmitted_to_report = 0; + + resubmit_transactions.extend(block_transactions.into_iter().filter( + |tx| { + let tx_hash = pool.hash_of(tx); + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + log::debug!( + target: "txpool", + "[{:?}]: Resubmitting from retracted block {:?}", + tx_hash, + hash, + ); + } + !contains + }, + )); + + metrics.report(|metrics| { + metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) + }); + } + + if let Err(e) = pool + .resubmit_at( + &id, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await + { + log::debug!( + target: "txpool", + "[{:?}] Error re-submitting transactions: {}", + id, + e, + ) + } + } - if next_action.revalidate { - let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); - self.revalidation_queue.revalidate_later(*block_number, hashes).await; + let extra_pool = pool.clone(); + // After #5200 lands, this arguably might be moved to the + // handler of "all blocks notification". + ready_poll.lock().trigger(block_number, move || { + Box::new(extra_pool.validated_pool().ready()) + }); - self.revalidation_strategy.lock().clear(); - } - } -} + if next_action.revalidate { + let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); + revalidation_queue.revalidate_later(block_number, hashes).await; -#[async_trait] -impl MaintainedTransactionPool for BasicPool -where - Block: BlockT, - PoolApi: 'static + graph::ChainApi, -{ - async fn maintain(&self, event: ChainEvent) { - let prev_finalized_block = self.enactment_state.lock().recent_finalized_block(); - let compute_tree_route = |from, to| -> Result, String> { - match self.api.tree_route(from, to) { - Ok(tree_route) => Ok(tree_route), - Err(e) => - return Err(format!( - "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" - )), - } - }; - - let result = self.enactment_state.lock().update(&event, &compute_tree_route); - - match result { - Err(msg) => { - log::debug!(target: "txpool", "{msg}"); - self.enactment_state.lock().force_update(&event); - }, - Ok(None) => {}, - Ok(Some(tree_route)) => { - self.handle_enactment(tree_route).await; + revalidation_strategy.lock().clear(); + } + } + .boxed() }, - }; - - if let ChainEvent::Finalized { hash, tree_route } = event { - log::trace!( - target: "txpool", - "on-finalized enacted: {tree_route:?}, previously finalized: \ - {prev_finalized_block:?}", - ); - - for hash in tree_route.iter().chain(std::iter::once(&hash)) { - if let Err(e) = self.pool.validated_pool().on_block_finalized(*hash).await { - log::warn!( - target: "txpool", - "Error occurred while attempting to notify watchers about finalization {}: {}", - hash, e - ) + ChainEvent::Finalized { hash, tree_route } => { + let pool = self.pool.clone(); + async move { + for hash in tree_route.iter().chain(&[hash]) { + if let Err(e) = pool.validated_pool().on_block_finalized(*hash).await { + log::warn!( + target: "txpool", + "Error [{}] occurred while attempting to notify watchers of finalization {}", + e, hash + ) + } + } } - } + .boxed() + }, } } } diff --git a/client/transaction-pool/src/tests.rs b/client/transaction-pool/src/tests.rs index d8732077eba52..79142e16a1b36 100644 --- a/client/transaction-pool/src/tests.rs +++ b/client/transaction-pool/src/tests.rs @@ -22,7 +22,6 @@ use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool}; use codec::Encode; use parking_lot::Mutex; use sc_transaction_pool_api::error; -use sp_blockchain::TreeRoute; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Hash}, @@ -164,7 +163,7 @@ impl ChainApi for TestApi { (Hashing::hash(&encoded), len) } - fn block_body(&self, _id: ::Hash) -> Self::BodyFuture { + fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { futures::future::ready(Ok(None)) } @@ -174,14 +173,6 @@ impl ChainApi for TestApi { ) -> Result::Header>, Self::Error> { Ok(None) } - - fn tree_route( - &self, - _from: ::Hash, - _to: ::Hash, - ) -> Result, Self::Error> { - unimplemented!() - } } pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 27891432753a4..d6ea5ab8c0625 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -30,14 +30,13 @@ use sc_transaction_pool::*; use sc_transaction_pool_api::{ ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; -use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::Block as _, transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; -use std::{collections::BTreeSet, pin::Pin, sync::Arc}; +use std::{collections::BTreeSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block, Extrinsic, Hash, Header, Index, Transfer}, AccountKeyring::*, @@ -51,32 +50,13 @@ fn pool() -> Pool { fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { let api = Arc::new(TestApi::with_alice_nonce(209)); - let (pool, background_task) = create_basic_pool_with_genesis(api.clone()); + let (pool, background_task) = BasicPool::new_test(api.clone()); let thread_pool = futures::executor::ThreadPool::new().unwrap(); thread_pool.spawn_ok(background_task); (pool, api, thread_pool) } -fn create_basic_pool_with_genesis( - test_api: Arc, -) -> (BasicPool, Pin + Send>>) { - let genesis_hash = { - test_api - .chain() - .read() - .block_by_number - .get(&0) - .map(|blocks| blocks[0].0.header.hash()) - .expect("there is block 0. qed") - }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) -} - -fn create_basic_pool(test_api: TestApi) -> BasicPool { - create_basic_pool_with_genesis(Arc::from(test_api)).0 -} - const SOURCE: TransactionSource = TransactionSource::External; #[test] @@ -348,7 +328,7 @@ fn should_revalidate_across_many_blocks() { block_on( watcher1 - .take_while(|s| future::ready(*s != TransactionStatus::InBlock((block_hash, 0)))) + .take_while(|s| future::ready(*s != TransactionStatus::InBlock(block_hash))) .collect::>(), ); @@ -407,7 +387,7 @@ fn should_push_watchers_during_maintenance() { let header_hash = header.hash(); block_on(pool.maintain(block_event(header))); - let event = ChainEvent::Finalized { hash: header_hash, tree_route: Arc::from(vec![]) }; + let event = ChainEvent::Finalized { hash: header_hash.clone(), tree_route: Arc::from(vec![]) }; block_on(pool.maintain(event)); // then @@ -418,24 +398,24 @@ fn should_push_watchers_during_maintenance() { futures::executor::block_on_stream(watcher0).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock((header_hash, 0)), - TransactionStatus::Finalized((header_hash, 0)) + TransactionStatus::InBlock(header_hash.clone()), + TransactionStatus::Finalized(header_hash.clone()) ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock((header_hash, 1)), - TransactionStatus::Finalized((header_hash, 1)) + TransactionStatus::InBlock(header_hash.clone()), + TransactionStatus::Finalized(header_hash.clone()) ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock((header_hash, 2)), - TransactionStatus::Finalized((header_hash, 2)) + TransactionStatus::InBlock(header_hash.clone()), + TransactionStatus::Finalized(header_hash.clone()) ], ); } @@ -456,7 +436,7 @@ fn finalization() { let xt = uxt(Alice, 209); let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); - let pool = create_basic_pool(api); + let (pool, _background) = BasicPool::new_test(api.into()); let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) .expect("1. Imported"); pool.api().push_block(2, vec![xt.clone()], true); @@ -470,8 +450,8 @@ fn finalization() { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); assert_eq!(stream.next(), None); } @@ -479,9 +459,9 @@ fn finalization() { fn fork_aware_finalization() { let api = TestApi::empty(); // starting block A1 (last finalized.) - let a_header = api.push_block(1, vec![], true); + api.push_block(1, vec![], true); - let pool = create_basic_pool(api); + let (pool, _background) = BasicPool::new_test(api.into()); let mut canon_watchers = vec![]; let from_alice = uxt(Alice, 1); @@ -496,13 +476,10 @@ fn fork_aware_finalization() { let from_dave_watcher; let from_bob_watcher; let b1; - let c1; let d1; let c2; let d2; - block_on(pool.maintain(block_event(a_header))); - // block B1 { let watcher = @@ -512,7 +489,6 @@ fn fork_aware_finalization() { canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); - log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; b1 = header.hash(); block_on(pool.maintain(event)); @@ -528,7 +504,6 @@ fn fork_aware_finalization() { block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 1); - log::trace!(target:"txpool", ">> C2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; c2 = header.hash(); block_on(pool.maintain(event)); @@ -543,7 +518,6 @@ fn fork_aware_finalization() { assert_eq!(pool.status().ready, 1); let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); - log::trace!(target:"txpool", ">> D2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d2 = header.hash(); block_on(pool.maintain(event)); @@ -556,9 +530,8 @@ fn fork_aware_finalization() { block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) .expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block_with_parent(b1, vec![from_charlie.clone()], true); - log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); - c1 = header.hash(); + let header = pool.api().push_block(3, vec![from_charlie.clone()], true); + canon_watchers.push((watcher, header.hash())); let event = block_event_with_retracted(header.clone(), d2, pool.api()); block_on(pool.maintain(event)); @@ -574,12 +547,11 @@ fn fork_aware_finalization() { let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api().push_block_with_parent(c1, vec![xt.clone()], true); - log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header); - d1 = header.hash(); + let header = pool.api().push_block(4, vec![xt.clone()], true); canon_watchers.push((w, header.hash())); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; + d1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); let event = ChainEvent::Finalized { hash: d1, tree_route: Arc::from(vec![]) }; @@ -588,10 +560,9 @@ fn fork_aware_finalization() { let e1; - // block E1 + // block e1 { - let header = pool.api().push_block_with_parent(d1, vec![from_dave, from_bob], true); - log::trace!(target:"txpool", ">> E1: {:?} {:?}", header.hash(), header); + let header = pool.api().push_block(5, vec![from_dave, from_bob], true); e1 = header.hash(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); @@ -602,31 +573,30 @@ fn fork_aware_finalization() { for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((h, 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((h, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c2, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2.clone()))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((e1, 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((e1, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((d2, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2.clone()))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - // In block e1 we submitted: [dave, bob] xts in this order. - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((e1, 1)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((e1, 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); assert_eq!(stream.next(), None); } } @@ -639,7 +609,7 @@ fn prune_and_retract_tx_at_same_time() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let pool = create_basic_pool(api); + let (pool, _background) = BasicPool::new_test(api.into()); let from_alice = uxt(Alice, 1); pool.api().increment_nonce(Alice.into()); @@ -676,10 +646,10 @@ fn prune_and_retract_tx_at_same_time() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1.clone()))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2, 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2))); assert_eq!(stream.next(), None); } } @@ -705,7 +675,7 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let pool = create_basic_pool(api); + let (pool, _background) = BasicPool::new_test(api.into()); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); @@ -750,7 +720,7 @@ fn resubmit_from_retracted_fork() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let pool = create_basic_pool(api); + let (pool, _background) = BasicPool::new_test(api.into()); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); @@ -811,7 +781,7 @@ fn resubmit_from_retracted_fork() { let e1 = { let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) .expect("1. Imported"); - let header = pool.api().push_block_with_parent(d1, vec![tx4.clone()], true); + let header = pool.api().push_block_with_parent(d1.clone(), vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; @@ -820,7 +790,7 @@ fn resubmit_from_retracted_fork() { let f1_header = { let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) .expect("1. Imported"); - let header = pool.api().push_block_with_parent(e1, vec![tx5.clone()], true); + let header = pool.api().push_block_with_parent(e1.clone(), vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); @@ -895,14 +865,13 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { #[test] fn should_not_accept_old_signatures() { let client = Arc::new(substrate_test_runtime_client::new()); - let best_hash = client.info().best_hash; - let finalized_hash = client.info().finalized_hash; + let pool = Arc::new( - BasicPool::new_test( - Arc::new(FullChainApi::new(client, None, &sp_core::testing::TaskExecutor::new())), - best_hash, - finalized_hash, - ) + BasicPool::new_test(Arc::new(FullChainApi::new( + client, + None, + &sp_core::testing::TaskExecutor::new(), + ))) .0, ); @@ -911,7 +880,7 @@ fn should_not_accept_old_signatures() { // generated with schnorrkel 0.1.1 from `_bytes` let old_singature = sp_core::sr25519::Signature::try_from( - &array_bytes::hex2bytes( + &hex::decode( "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426\ cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108", ) @@ -938,19 +907,12 @@ fn should_not_accept_old_signatures() { fn import_notification_to_pool_maintain_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); - let best_hash = client.info().best_hash; - let finalized_hash = client.info().finalized_hash; - let pool = Arc::new( - BasicPool::new_test( - Arc::new(FullChainApi::new( - client.clone(), - None, - &sp_core::testing::TaskExecutor::new(), - )), - best_hash, - finalized_hash, - ) + BasicPool::new_test(Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + ))) .0, ); @@ -1035,540 +997,3 @@ fn stale_transactions_are_pruned() { assert_eq!(pool.status().future, 0); assert_eq!(pool.status().ready, 0); } - -#[test] -fn finalized_only_handled_correctly() { - sp_tracing::try_init_simple(); - let xt = uxt(Alice, 209); - - let (pool, api, _guard) = maintained_pool(); - - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); - assert_eq!(pool.status().ready, 1); - - let header = api.push_block(1, vec![xt], false); - - let event = - ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - - assert_eq!(pool.status().ready, 0); - - { - let mut stream = futures::executor::block_on_stream(watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.clone().hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); - assert_eq!(stream.next(), None); - } -} - -#[test] -fn best_block_after_finalized_handled_correctly() { - sp_tracing::try_init_simple(); - let xt = uxt(Alice, 209); - - let (pool, api, _guard) = maintained_pool(); - - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); - assert_eq!(pool.status().ready, 1); - - let header = api.push_block(1, vec![xt], true); - - let event = - ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - block_on(pool.maintain(block_event(header.clone()))); - - assert_eq!(pool.status().ready, 0); - - { - let mut stream = futures::executor::block_on_stream(watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.clone().hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); - assert_eq!(stream.next(), None); - } -} - -#[test] -fn switching_fork_with_finalized_works() { - sp_tracing::try_init_simple(); - let api = TestApi::empty(); - // starting block A1 (last finalized.) - let a_header = api.push_block(1, vec![], true); - - let pool = create_basic_pool(api); - - let from_alice = uxt(Alice, 1); - let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - - let from_alice_watcher; - let from_bob_watcher; - let b1_header; - let b2_header; - - // block B1 - { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); - assert_eq!(pool.status().ready, 1); - log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); - b1_header = header; - } - - // block B2 - { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent( - a_header.hash(), - vec![from_alice.clone(), from_bob.clone()], - true, - ); - assert_eq!(pool.status().ready, 2); - - log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header); - b2_header = header; - } - - { - let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 1); - } - - { - let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - } - - { - let mut stream = futures::executor::block_on_stream(from_alice_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } - - { - let mut stream = futures::executor::block_on_stream(from_bob_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 1)))); - assert_eq!(stream.next(), None); - } -} - -#[test] -fn switching_fork_multiple_times_works() { - sp_tracing::try_init_simple(); - let api = TestApi::empty(); - // starting block A1 (last finalized.) - let a_header = api.push_block(1, vec![], true); - - let pool = create_basic_pool(api); - - let from_alice = uxt(Alice, 1); - let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - - let from_alice_watcher; - let from_bob_watcher; - let b1_header; - let b2_header; - - // block B1 - { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); - assert_eq!(pool.status().ready, 1); - log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); - b1_header = header; - } - - // block B2 - { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent( - a_header.hash(), - vec![from_alice.clone(), from_bob.clone()], - true, - ); - assert_eq!(pool.status().ready, 2); - - log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header); - b2_header = header; - } - - { - // phase-0 - let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 1); - } - - { - // phase-1 - let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), pool.api()); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - } - - { - // phase-2 - let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), pool.api()); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 1); - } - - { - // phase-3 - let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - } - - { - let mut stream = futures::executor::block_on_stream(from_alice_watcher); - //phase-0 - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); - //phase-1 - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); - //phase-2 - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); - //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } - - { - let mut stream = futures::executor::block_on_stream(from_bob_watcher); - //phase-1 - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); - //phase-2 - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 1)))); - assert_eq!(stream.next(), None); - } -} - -#[test] -fn two_blocks_delayed_finalization_works() { - sp_tracing::try_init_simple(); - let api = TestApi::empty(); - // starting block A1 (last finalized.) - let a_header = api.push_block(1, vec![], true); - - let pool = create_basic_pool(api); - - let from_alice = uxt(Alice, 1); - let from_bob = uxt(Bob, 2); - let from_charlie = uxt(Charlie, 3); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - pool.api().increment_nonce(Charlie.into()); - - let from_alice_watcher; - let from_bob_watcher; - let from_charlie_watcher; - let b1_header; - let c1_header; - let d1_header; - - // block B1 - { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); - assert_eq!(pool.status().ready, 1); - - log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); - b1_header = header; - } - - // block C1 - { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); - assert_eq!(pool.status().ready, 2); - - log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); - c1_header = header; - } - - // block D1 - { - from_charlie_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); - assert_eq!(pool.status().ready, 3); - - log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header); - d1_header = header; - } - - { - let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 3); - } - - { - let event = ChainEvent::NewBestBlock { hash: d1_header.hash(), tree_route: None }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - } - - { - let event = ChainEvent::Finalized { - hash: c1_header.hash(), - tree_route: Arc::from(vec![b1_header.hash()]), - }; - block_on(pool.maintain(event)); - } - - // this is to collect events from_charlie_watcher and make sure nothing was retracted - { - let event = ChainEvent::Finalized { hash: d1_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - } - - { - let mut stream = futures::executor::block_on_stream(from_alice_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } - - { - let mut stream = futures::executor::block_on_stream(from_bob_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } - - { - let mut stream = futures::executor::block_on_stream(from_charlie_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((d1_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((d1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } -} - -#[test] -fn delayed_finalization_does_not_retract() { - sp_tracing::try_init_simple(); - let api = TestApi::empty(); - // starting block A1 (last finalized.) - let a_header = api.push_block(1, vec![], true); - - let pool = create_basic_pool(api); - - let from_alice = uxt(Alice, 1); - let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - - let from_alice_watcher; - let from_bob_watcher; - let b1_header; - let c1_header; - - // block B1 - { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); - assert_eq!(pool.status().ready, 1); - - log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); - b1_header = header; - } - - // block C1 - { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); - assert_eq!(pool.status().ready, 2); - - log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); - c1_header = header; - } - - { - // phase-0 - let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 1); - } - - { - // phase-1 - let event = ChainEvent::NewBestBlock { hash: c1_header.hash(), tree_route: None }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - } - - { - // phase-2 - let event = ChainEvent::Finalized { hash: b1_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - } - - { - // phase-3 - let event = ChainEvent::Finalized { hash: c1_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - } - - { - let mut stream = futures::executor::block_on_stream(from_alice_watcher); - //phase-0 - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); - //phase-2 - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } - - { - let mut stream = futures::executor::block_on_stream(from_bob_watcher); - //phase-0 - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - //phase-1 - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); - //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } -} - -#[test] -fn best_block_after_finalization_does_not_retract() { - sp_tracing::try_init_simple(); - let api = TestApi::empty(); - // starting block A1 (last finalized.) - let a_header = api.push_block(1, vec![], true); - - let pool = create_basic_pool(api); - - let from_alice = uxt(Alice, 1); - let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - - let from_alice_watcher; - let from_bob_watcher; - let b1_header; - let c1_header; - - // block B1 - { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); - assert_eq!(pool.status().ready, 1); - - log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); - b1_header = header; - } - - // block C1 - { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); - assert_eq!(pool.status().ready, 2); - - log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); - c1_header = header; - } - - { - let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) }; - block_on(pool.maintain(event)); - } - - { - let event = ChainEvent::Finalized { - hash: c1_header.hash(), - tree_route: Arc::from(vec![a_header.hash(), b1_header.hash()]), - }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - } - - { - let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; - block_on(pool.maintain(event)); - } - - { - let mut stream = futures::executor::block_on_stream(from_alice_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } - - { - let mut stream = futures::executor::block_on_stream(from_bob_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); - assert_eq!(stream.next(), None); - } -} diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 082ac3b55e80d..2df04be7fb4af 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" futures-timer = "3.0.2" lazy_static = "1.4.0" log = "0.4" -parking_lot = "0.12.1" +parking_lot = "0.12.0" prometheus = { version = "0.13.0", default-features = false } [features] diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 25f8e582c78fc..c867a245739ff 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -360,7 +360,7 @@ Runtime ------- * Introduce stacked filtering (#6273) -* Allow "pure" proxied accounts (#6236) +* Allow "anonymous" proxied accounts (#6236) * Allow over-weight collective proposals to be closed (#6163) * Fix Election when ForceNone V1 (#6166) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 133ba7b094d43..0b9e6e7783058 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -24,19 +24,8 @@ /.gitlab-ci.yml @paritytech/ci # Sandboxing capability of Substrate Runtime -/primitives/sandbox/ @pepyakin @koute - -# WASM executor, low-level client <-> WASM interface and other WASM-related code -/client/executor/ @koute -/client/allocator/ @koute -/primitives/wasm-interface/ @koute -/primitives/runtime-interface/ @koute -/primitives/panic-handler/ @koute -/utils/wasm-builder/ @koute - -# Systems-related bits and bobs on the client side -/client/sysinfo/ @koute -/client/tracing/ @koute +/primitives/sr-sandbox/ @pepyakin +/primitives/core/src/sandbox.rs @pepyakin # GRANDPA, BABE, consensus stuff /frame/babe/ @andresilva @@ -47,13 +36,11 @@ /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas -# BEEFY, MMR +# BEEFY /client/beefy/ @acatangiu /frame/beefy/ @acatangiu /frame/beefy-mmr/ @acatangiu -/frame/merkle-mountain-range/ @acatangiu /primitives/beefy/ @acatangiu -/primitives/merkle-mountain-range/ @acatangiu # Contracts /frame/contracts/ @athei diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md index 7540e0d5b5b8c..f750c6dd5865b 100644 --- a/docs/Upgrading-2.0-to-3.0.md +++ b/docs/Upgrading-2.0-to-3.0.md @@ -100,12 +100,12 @@ And update the overall definition for weights on frame and a few related types a +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. -+const MAXIMUM_BLOCK_WEIGHT: Weight = 2u64 * WEIGHT_PER_SECOND; ++const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// We allow for 2 seconds of compute with a 6 second average block time. -- pub const MaximumBlockWeight: Weight = 2u64 * WEIGHT_PER_SECOND; +- pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = @@ -147,8 +147,8 @@ And update the overall definition for weights on frame and a few related types a + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = Index; @@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; diff --git a/frame/alliance/Cargo.toml b/frame/alliance/Cargo.toml index 399822a2215f5..706827708ce88 100644 --- a/frame/alliance/Cargo.toml +++ b/frame/alliance/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "4.1", optional = true } +hex = { version = "0.4", default-features = false, features = ["alloc"], optional = true } sha2 = { version = "0.10.1", default-features = false, optional = true } log = { version = "0.4.14", default-features = false } @@ -33,7 +33,7 @@ pallet-identity = { version = "4.0.0-dev", path = "../identity", default-feature pallet-collective = { version = "4.0.0-dev", path = "../collective", default-features = false, optional = true } [dev-dependencies] -array-bytes = "4.1" +hex-literal = "0.3.1" sha2 = "0.10.1" pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-collective = { version = "4.0.0-dev", path = "../collective" } @@ -41,8 +41,6 @@ pallet-collective = { version = "4.0.0-dev", path = "../collective" } [features] default = ["std"] std = [ - "pallet-collective?/std", - "frame-benchmarking?/std", "log/std", "codec/std", "scale-info/std", @@ -55,9 +53,9 @@ std = [ "pallet-identity/std", ] runtime-benchmarks = [ - "array-bytes", + "hex", "sha2", - "frame-benchmarking/runtime-benchmarks", + "frame-benchmarking", "sp-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/frame/alliance/README.md b/frame/alliance/README.md index dff9e0a47aa2c..f0900c84cbd85 100644 --- a/frame/alliance/README.md +++ b/frame/alliance/README.md @@ -43,7 +43,6 @@ to update the Alliance's rule and make announcements. #### For Members (All) -- `give_retirement_notice` - Give a retirement notice and start a retirement period required to pass in order to retire. - `retire` - Retire from the Alliance and release the caller's deposit. #### For Members (Founders/Fellows) @@ -66,5 +65,4 @@ to update the Alliance's rule and make announcements. #### Root Calls -- `init_members` - Initialize the Alliance, onboard founders, fellows, and allies. -- `disband` - Disband the Alliance, remove all active members and unreserve deposits. +- `init_founders` - Initialize the founding members. diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index e2e1579fcc9b4..527c35b58a5d8 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -19,7 +19,6 @@ use sp_runtime::traits::{Bounded, Hash, StaticLookup}; use sp_std::{ - cmp, convert::{TryFrom, TryInto}, mem::size_of, prelude::*, @@ -35,7 +34,7 @@ const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -122,12 +121,7 @@ benchmarks_instance_pallet! { let proposer = founders[0].clone(); let fellows = (0 .. y).map(fellow::).collect::>(); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; let threshold = m; // Add previous proposals. @@ -173,12 +167,7 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; // Threshold is 1 less than the number of members so that one person can vote nay let threshold = m - 1; @@ -241,12 +230,7 @@ benchmarks_instance_pallet! { let founders = (0 .. m).map(founder::).collect::>(); let vetor = founders[0].clone(); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - vec![], - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, vec![], vec![])?; // Threshold is one less than total members so that two nays will disapprove the vote let threshold = m - 1; @@ -292,12 +276,7 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -353,7 +332,7 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -377,12 +356,7 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -443,7 +417,7 @@ benchmarks_instance_pallet! { index, true, )?; - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -468,12 +442,7 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -520,7 +489,7 @@ benchmarks_instance_pallet! { System::::set_block_number(T::BlockNumber::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -544,12 +513,7 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members( - SystemOrigin::Root.into(), - founders, - fellows, - vec![], - )?; + Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -598,19 +562,19 @@ benchmarks_instance_pallet! { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(T::BlockNumber::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); } init_members { - // at least 1 founders - let x in 1 .. T::MaxFounders::get(); + // at least 2 founders + let x in 2 .. T::MaxFounders::get(); let y in 0 .. T::MaxFellows::get(); let z in 0 .. T::MaxAllies::get(); - let mut founders = (0 .. x).map(founder::).collect::>(); + let mut founders = (2 .. x).map(founder::).collect::>(); let mut fellows = (0 .. y).map(fellow::).collect::>(); let mut allies = (0 .. z).map(ally::).collect::>(); @@ -629,47 +593,6 @@ benchmarks_instance_pallet! { assert_eq!(Alliance::::members(MemberRole::Ally), allies); } - disband { - // at least 1 founders - let x in 1 .. T::MaxFounders::get() + T::MaxFellows::get(); - let y in 0 .. T::MaxAllies::get(); - let z in 0 .. T::MaxMembersCount::get() / 2; - - let voting_members = (0 .. x).map(founder::).collect::>(); - let allies = (0 .. y).map(ally::).collect::>(); - let witness = DisbandWitness{ - voting_members: x, - ally_members: y, - }; - - // setting the Alliance to disband on the benchmark call - Alliance::::init_members( - SystemOrigin::Root.into(), - voting_members.clone(), - vec![], - allies.clone(), - )?; - - // reserve deposits - let deposit = T::AllyDeposit::get(); - for member in voting_members.iter().chain(allies.iter()).take(z as usize) { - T::Currency::reserve(&member, deposit)?; - >::insert(&member, deposit); - } - - assert_eq!(Alliance::::voting_members_count(), x); - assert_eq!(Alliance::::ally_members_count(), y); - }: _(SystemOrigin::Root, witness) - verify { - assert_last_event::(Event::AllianceDisbanded { - voting_members: x, - ally_members: y, - unreserved: cmp::min(z, x + y), - }.into()); - - assert!(!Alliance::::is_initialized()); - } - set_rule { set_members::(); @@ -739,7 +662,7 @@ benchmarks_instance_pallet! { assert!(!Alliance::::is_member(&outsider)); assert_eq!(DepositOf::::get(&outsider), None); - let outsider_lookup = T::Lookup::unlookup(outsider.clone()); + let outsider_lookup: ::Source = T::Lookup::unlookup(outsider.clone()); }: _(SystemOrigin::Signed(founder1.clone()), outsider_lookup) verify { assert!(Alliance::::is_member_of(&outsider, MemberRole::Ally)); // outsider is now an ally @@ -758,7 +681,7 @@ benchmarks_instance_pallet! { let ally1 = ally::(1); assert!(Alliance::::is_ally(&ally1)); - let ally1_lookup = T::Lookup::unlookup(ally1.clone()); + let ally1_lookup: ::Source = T::Lookup::unlookup(ally1.clone()); let call = Call::::elevate_ally { ally: ally1_lookup }; let origin = T::MembershipManager::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } @@ -768,37 +691,12 @@ benchmarks_instance_pallet! { assert_last_event::(Event::AllyElevated { ally: ally1 }.into()); } - give_retirement_notice { - set_members::(); - let fellow2 = fellow::(2); - - assert!(Alliance::::is_fellow(&fellow2)); - }: _(SystemOrigin::Signed(fellow2.clone())) - verify { - assert!(Alliance::::is_member_of(&fellow2, MemberRole::Retiring)); - - assert_eq!( - RetiringMembers::::get(&fellow2), - Some(System::::block_number() + T::RetirementPeriod::get()) - ); - assert_last_event::( - Event::MemberRetirementPeriodStarted {member: fellow2}.into() - ); - } - retire { set_members::(); let fellow2 = fellow::(2); assert!(Alliance::::is_fellow(&fellow2)); - - assert_eq!( - Alliance::::give_retirement_notice( - SystemOrigin::Signed(fellow2.clone()).into() - ), - Ok(()) - ); - System::::set_block_number(System::::block_number() + T::RetirementPeriod::get()); + assert!(!Alliance::::is_up_for_kicking(&fellow2)); assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); }: _(SystemOrigin::Signed(fellow2.clone())) @@ -815,10 +713,14 @@ benchmarks_instance_pallet! { set_members::(); let fellow2 = fellow::(2); + UpForKicking::::insert(&fellow2, true); + assert!(Alliance::::is_member_of(&fellow2, MemberRole::Fellow)); + assert!(Alliance::::is_up_for_kicking(&fellow2)); + assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); - let fellow2_lookup = T::Lookup::unlookup(fellow2.clone()); + let fellow2_lookup: ::Source = T::Lookup::unlookup(fellow2.clone()); let call = Call::::kick_member { who: fellow2_lookup }; let origin = T::MembershipManager::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } @@ -832,8 +734,8 @@ benchmarks_instance_pallet! { } add_unscrupulous_items { - let n in 0 .. T::MaxUnscrupulousItems::get(); - let l in 0 .. T::MaxWebsiteUrlLength::get(); + let n in 1 .. T::MaxUnscrupulousItems::get(); + let l in 1 .. T::MaxWebsiteUrlLength::get(); set_members::(); @@ -856,8 +758,8 @@ benchmarks_instance_pallet! { } remove_unscrupulous_items { - let n in 0 .. T::MaxUnscrupulousItems::get(); - let l in 0 .. T::MaxWebsiteUrlLength::get(); + let n in 1 .. T::MaxUnscrupulousItems::get(); + let l in 1 .. T::MaxWebsiteUrlLength::get(); set_members::(); diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index fca17e69c7652..111ea5dc6e507 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -60,8 +60,6 @@ //! //! #### For Members (All) //! -//! - `give_retirement_notice` - Give a retirement notice and start a retirement period required to -//! pass in order to retire. //! - `retire` - Retire from the Alliance and release the caller's deposit. //! //! #### For Members (Founders/Fellows) @@ -84,8 +82,7 @@ //! //! #### Root Calls //! -//! - `init_members` - Initialize the Alliance, onboard founders, fellows, and allies. -//! - `disband` - Disband the Alliance, remove all active members and unreserve deposits. +//! - `init_founders` - Initialize the founding members. #![cfg_attr(not(feature = "std"), no_std)] @@ -96,14 +93,13 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod migration; mod types; pub mod weights; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use sp_runtime::{ - traits::{Saturating, StaticLookup, Zero}, + traits::{StaticLookup, Zero}, RuntimeDebug, }; use sp_std::{convert::TryInto, prelude::*}; @@ -120,7 +116,7 @@ use frame_support::{ ChangeMembers, Currency, Get, InitializeMembers, IsSubType, OnUnbalanced, ReservableCurrency, }, - weights::{OldWeight, Weight}, + weights::Weight, }; use pallet_identity::IdentityField; @@ -128,9 +124,6 @@ pub use pallet::*; pub use types::*; pub use weights::*; -/// The log target of this pallet. -pub const LOG_TARGET: &str = "runtime::alliance"; - /// Simple index type for proposal counting. pub type ProposalIndex = u32; @@ -173,8 +166,6 @@ impl IdentityVerifier for () { /// The provider of a collective action interface, for example an instance of `pallet-collective`. pub trait ProposalProvider { - /// Add a new proposal. - /// Returns a proposal length and active proposals count if successful. fn propose_proposal( who: AccountId, threshold: u32, @@ -182,8 +173,6 @@ pub trait ProposalProvider { length_bound: u32, ) -> Result<(u32, u32), DispatchError>; - /// Add an aye or nay vote for the sender to the given proposal. - /// Returns true if the sender votes first time if successful. fn vote_proposal( who: AccountId, proposal: Hash, @@ -191,11 +180,8 @@ pub trait ProposalProvider { approve: bool, ) -> Result; - /// Veto a proposal, closing and removing it from the system, regardless of its current state. - /// Returns an active proposals count, which includes removed proposal. fn veto_proposal(proposal_hash: Hash) -> u32; - /// Close a proposal that is either approved, disapproved, or whose voting period has ended. fn close_proposal( proposal_hash: Hash, index: ProposalIndex, @@ -203,7 +189,6 @@ pub trait ProposalProvider { length_bound: u32, ) -> DispatchResultWithPostInfo; - /// Return a proposal of the given hash. fn proposal_of(proposal_hash: Hash) -> Option; } @@ -213,7 +198,6 @@ pub enum MemberRole { Founder, Fellow, Ally, - Retiring, } /// The type of item that may be deemed unscrupulous. @@ -226,40 +210,36 @@ pub enum UnscrupulousItem { type UnscrupulousItemOf = UnscrupulousItem<::AccountId, UrlOf>; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] - #[pallet::storage_version(migration::STORAGE_VERSION)] pub struct Pallet(PhantomData<(T, I)>); #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; - /// The runtime call dispatch type. + /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable + + Dispatchable + From> + From> + GetDispatchInfo + IsSubType> - + IsType<::RuntimeCall>; + + IsType<::Call>; /// Origin for admin-level operations, like setting the Alliance's rules. - type AdminOrigin: EnsureOrigin; + type AdminOrigin: EnsureOrigin; /// Origin that manages entry and forcible discharge from the Alliance. - type MembershipManager: EnsureOrigin; + type MembershipManager: EnsureOrigin; /// Origin for making announcements and adding/removing unscrupulous items. - type AnnouncementOrigin: EnsureOrigin; + type AnnouncementOrigin: EnsureOrigin; /// The currency used for deposits. type Currency: ReservableCurrency; @@ -326,18 +306,14 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - - /// The number of blocks a member must wait between giving a retirement notice and retiring. - /// Supposed to be greater than time required to `kick_member`. - type RetirementPeriod: Get; } #[pallet::error] pub enum Error { + /// The founders/fellows/allies have already been initialized. + AllianceAlreadyInitialized, /// The Alliance has not been initialized yet, therefore accounts cannot join it. AllianceNotYetInitialized, - /// The Alliance has been initialized, therefore cannot be initialized again. - AllianceAlreadyInitialized, /// Account is already a member. AlreadyMember, /// Account is not a member. @@ -346,6 +322,8 @@ pub mod pallet { NotAlly, /// Account is not a founder. NotFounder, + /// This member is up for being kicked from the Alliance and cannot perform this operation. + UpForKicking, /// Account does not have voting rights. NoVotingRights, /// Account is already an elevated (fellow) member. @@ -377,16 +355,6 @@ pub mod pallet { TooManyMembers, /// Number of announcements exceeds `MaxAnnouncementsCount`. TooManyAnnouncements, - /// Invalid witness data given. - BadWitness, - /// Account already gave retirement notice - AlreadyRetiring, - /// Account did not give a retirement notice required to retire. - RetirementNoticeNotGiven, - /// Retirement period has not passed. - RetirementPeriodNotPassed, - /// Founders must be provided to initialize the Alliance. - FoundersMissing, } #[pallet::event] @@ -412,8 +380,6 @@ pub mod pallet { }, /// An ally has been elevated to Fellow. AllyElevated { ally: T::AccountId }, - /// A member gave retirement notice and their retirement period started. - MemberRetirementPeriodStarted { member: T::AccountId }, /// A member has retired with its deposit unreserved. MemberRetired { member: T::AccountId, unreserved: Option> }, /// A member has been kicked out with its deposit slashed. @@ -422,8 +388,6 @@ pub mod pallet { UnscrupulousItemAdded { items: Vec> }, /// Accounts or websites have been removed from the list of unscrupulous items. UnscrupulousItemRemoved { items: Vec> }, - /// Alliance disbanded. Includes number deleted members and unreserved deposits. - AllianceDisbanded { voting_members: u32, ally_members: u32, unreserved: u32 }, } #[pallet::genesis_config] @@ -467,22 +431,15 @@ pub mod pallet { !Pallet::::has_member(MemberRole::Fellow), "Fellows are already initialized!" ); - assert!( - !self.founders.is_empty(), - "Founders must be provided to initialize the Alliance" - ); let members: BoundedVec = self.fellows.clone().try_into().expect("Too many genesis fellows"); Members::::insert(MemberRole::Fellow, members); } if !self.allies.is_empty() { + // Only allow Allies if the Alliance is "initialized". assert!( - !Pallet::::has_member(MemberRole::Ally), - "Allies are already initialized!" - ); - assert!( - !self.founders.is_empty(), - "Founders must be provided to initialize the Alliance" + Pallet::::is_initialized(), + "Alliance must have Founders or Fellows to have Allies" ); let members: BoundedVec = self.allies.clone().try_into().expect("Too many genesis allies"); @@ -526,12 +483,12 @@ pub mod pallet { ValueQuery, >; - /// A set of members who gave a retirement notice. They can retire after the end of retirement - /// period stored as a future block number. + /// A set of members that are (potentially) being kicked out. They cannot retire until the + /// motion is settled. #[pallet::storage] - #[pallet::getter(fn retiring_members)] - pub type RetiringMembers, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, T::AccountId, T::BlockNumber, OptionQuery>; + #[pallet::getter(fn up_for_kicking)] + pub type UpForKicking, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, bool, ValueQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. @@ -566,6 +523,11 @@ pub mod pallet { let proposor = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&proposor), Error::::NoVotingRights); + if let Some(Call::kick_member { who }) = proposal.is_sub_type() { + let strike = T::Lookup::lookup(who.clone())?; + >::insert(strike, true); + } + T::ProposalProvider::propose_proposal(proposor, threshold, proposal, length_bound)?; Ok(()) } @@ -620,33 +582,34 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(x, y, p2)) .max(T::WeightInfo::close_approved(b, x, y, p2)) .max(T::WeightInfo::close_disapproved(x, y, p2)) - .saturating_add(p1.into()) + .saturating_add(p1) })] - #[allow(deprecated)] - #[deprecated(note = "1D weight is used in this extrinsic, please migrate to use `close`")] - pub fn close_old_weight( + pub fn close( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: OldWeight, + #[pallet::compact] proposal_weight_bound: Weight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { - let proposal_weight_bound: Weight = proposal_weight_bound.into(); let who = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); - Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + let info = T::ProposalProvider::close_proposal( + proposal_hash, + index, + proposal_weight_bound, + length_bound, + )?; + Ok(info.into()) } - /// Initialize the Alliance, onboard founders, fellows, and allies. + /// Initialize the founders, fellows, and allies. /// - /// Founders must be not empty. - /// The Alliance must be empty. - /// Must be called by the Root origin. + /// This should only be called once, and must be called by the Root origin. #[pallet::weight(T::WeightInfo::init_members( - founders.len() as u32, - fellows.len() as u32, - allies.len() as u32, + T::MaxFounders::get(), + T::MaxFellows::get(), + T::MaxAllies::get() ))] pub fn init_members( origin: OriginFor, @@ -656,7 +619,9 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - ensure!(!founders.is_empty(), Error::::FoundersMissing); + // Cannot be called if the Alliance already has Founders or Fellows. + // TODO: Remove check and allow Root to set members at any time. + // https://github.com/paritytech/substrate/issues/11928 ensure!(!Self::is_initialized(), Error::::AllianceAlreadyInitialized); let mut founders: BoundedVec = @@ -685,11 +650,9 @@ pub mod pallet { T::InitializeMembers::initialize_members(&voteable_members); log::debug!( - target: LOG_TARGET, + target: "runtime::alliance", "Initialize alliance founders: {:?}, fellows: {:?}, allies: {:?}", - founders, - fellows, - allies + founders, fellows, allies ); Self::deposit_event(Event::MembersInitialized { @@ -700,59 +663,6 @@ pub mod pallet { Ok(()) } - /// Disband the Alliance, remove all active members and unreserve deposits. - /// - /// Witness data must be set. - #[pallet::weight(T::WeightInfo::disband( - witness.voting_members, - witness.ally_members, - witness.voting_members.saturating_add(witness.ally_members), - ))] - pub fn disband( - origin: OriginFor, - witness: DisbandWitness, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - - ensure!(!witness.is_zero(), Error::::BadWitness); - ensure!( - Self::voting_members_count() <= witness.voting_members, - Error::::BadWitness - ); - ensure!(Self::ally_members_count() <= witness.ally_members, Error::::BadWitness); - ensure!(Self::is_initialized(), Error::::AllianceNotYetInitialized); - - let voting_members = Self::voting_members(); - T::MembershipChanged::change_members_sorted(&[], &voting_members, &[]); - - let ally_members = Self::members_of(MemberRole::Ally); - let mut unreserve_count: u32 = 0; - for member in voting_members.iter().chain(ally_members.iter()) { - if let Some(deposit) = DepositOf::::take(&member) { - let err_amount = T::Currency::unreserve(&member, deposit); - debug_assert!(err_amount.is_zero()); - unreserve_count += 1; - } - } - - Members::::remove(&MemberRole::Founder); - Members::::remove(&MemberRole::Fellow); - Members::::remove(&MemberRole::Ally); - - Self::deposit_event(Event::AllianceDisbanded { - voting_members: voting_members.len() as u32, - ally_members: ally_members.len() as u32, - unreserved: unreserve_count, - }); - - Ok(Some(T::WeightInfo::disband( - voting_members.len() as u32, - ally_members.len() as u32, - unreserve_count, - )) - .into()) - } - /// Set a new IPFS CID to the alliance rule. #[pallet::weight(T::WeightInfo::set_rule())] pub fn set_rule(origin: OriginFor, rule: Cid) -> DispatchResult { @@ -834,7 +744,10 @@ pub mod pallet { /// A founder or fellow can nominate someone to join the alliance as an Ally. /// There is no deposit required to the nominator or nominee. #[pallet::weight(T::WeightInfo::nominate_ally())] - pub fn nominate_ally(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn nominate_ally( + origin: OriginFor, + who: ::Source, + ) -> DispatchResult { let nominator = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&nominator), Error::::NoVotingRights); let who = T::Lookup::lookup(who)?; @@ -858,7 +771,10 @@ pub mod pallet { /// Elevate an ally to fellow. #[pallet::weight(T::WeightInfo::elevate_ally())] - pub fn elevate_ally(origin: OriginFor, ally: AccountIdLookupOf) -> DispatchResult { + pub fn elevate_ally( + origin: OriginFor, + ally: ::Source, + ) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let ally = T::Lookup::lookup(ally)?; ensure!(Self::is_ally(&ally), Error::::NotAlly); @@ -871,40 +787,15 @@ pub mod pallet { Ok(()) } - /// As a member, give a retirement notice and start a retirement period required to pass in - /// order to retire. - #[pallet::weight(T::WeightInfo::give_retirement_notice())] - pub fn give_retirement_notice(origin: OriginFor) -> DispatchResult { - let who = ensure_signed(origin)?; - let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; - ensure!(role.ne(&MemberRole::Retiring), Error::::AlreadyRetiring); - - Self::remove_member(&who, role)?; - Self::add_member(&who, MemberRole::Retiring)?; - >::insert( - &who, - frame_system::Pallet::::block_number() - .saturating_add(T::RetirementPeriod::get()), - ); - - Self::deposit_event(Event::MemberRetirementPeriodStarted { member: who }); - Ok(()) - } - /// As a member, retire from the alliance and unreserve the deposit. - /// This can only be done once you have `give_retirement_notice` and it has expired. #[pallet::weight(T::WeightInfo::retire())] pub fn retire(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - let retirement_period_end = RetiringMembers::::get(&who) - .ok_or(Error::::RetirementNoticeNotGiven)?; - ensure!( - frame_system::Pallet::::block_number() >= retirement_period_end, - Error::::RetirementPeriodNotPassed - ); + // A member up for kicking cannot retire. + ensure!(!Self::is_up_for_kicking(&who), Error::::UpForKicking); - Self::remove_member(&who, MemberRole::Retiring)?; - >::remove(&who); + let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; + Self::remove_member(&who, role)?; let deposit = DepositOf::::take(&who); if let Some(deposit) = deposit { let err_amount = T::Currency::unreserve(&who, deposit); @@ -916,7 +807,10 @@ pub mod pallet { /// Kick a member from the alliance and slash its deposit. #[pallet::weight(T::WeightInfo::kick_member())] - pub fn kick_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn kick_member( + origin: OriginFor, + who: ::Source, + ) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let member = T::Lookup::lookup(who)?; @@ -927,6 +821,8 @@ pub mod pallet { T::Slashed::on_unbalanced(T::Currency::slash_reserved(&member, deposit).0); } + >::remove(&member); + Self::deposit_event(Event::MemberKicked { member, slashed: deposit }); Ok(()) } @@ -982,43 +878,13 @@ pub mod pallet { Self::deposit_event(Event::UnscrupulousItemRemoved { items }); Ok(()) } - - /// Close a vote that is either approved, disapproved, or whose voting period has ended. - /// - /// Requires the sender to be a founder or fellow. - #[pallet::weight({ - let b = *length_bound; - let x = T::MaxFounders::get(); - let y = T::MaxFellows::get(); - let p1 = *proposal_weight_bound; - let p2 = T::MaxProposals::get(); - T::WeightInfo::close_early_approved(b, x, y, p2) - .max(T::WeightInfo::close_early_disapproved(x, y, p2)) - .max(T::WeightInfo::close_approved(b, x, y, p2)) - .max(T::WeightInfo::close_disapproved(x, y, p2)) - .saturating_add(p1) - })] - pub fn close( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] index: ProposalIndex, - proposal_weight_bound: Weight, - #[pallet::compact] length_bound: u32, - ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); - - Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) - } } } impl, I: 'static> Pallet { /// Check if the Alliance has been initialized. fn is_initialized() -> bool { - Self::has_member(MemberRole::Founder) || - Self::has_member(MemberRole::Fellow) || - Self::has_member(MemberRole::Ally) + Self::has_member(MemberRole::Founder) || Self::has_member(MemberRole::Fellow) } /// Check if a given role has any members. @@ -1062,36 +928,18 @@ impl, I: 'static> Pallet { Self::is_founder(who) || Self::is_fellow(who) } - /// Count of ally members. - fn ally_members_count() -> u32 { - Members::::decode_len(MemberRole::Ally).unwrap_or(0) as u32 - } - - /// Count of all members who have voting rights. - fn voting_members_count() -> u32 { - Members::::decode_len(MemberRole::Founder) - .unwrap_or(0) - .saturating_add(Members::::decode_len(MemberRole::Fellow).unwrap_or(0)) as u32 - } - - /// Get all members of a given role. - fn members_of(role: MemberRole) -> Vec { - Members::::get(role).into_inner() - } - /// Collect all members who have voting rights into one list. - fn voting_members() -> Vec { - let mut founders = Self::members_of(MemberRole::Founder); - let mut fellows = Self::members_of(MemberRole::Fellow); + fn votable_members_sorted() -> Vec { + let mut founders = Members::::get(MemberRole::Founder).into_inner(); + let mut fellows = Members::::get(MemberRole::Fellow).into_inner(); founders.append(&mut fellows); - founders + founders.sort(); + founders.into() } - /// Collect all members who have voting rights into one sorted list. - fn voting_members_sorted() -> Vec { - let mut members = Self::voting_members(); - members.sort(); - members + /// Check if an account's forced removal is up for consideration. + fn is_up_for_kicking(who: &T::AccountId) -> bool { + >::contains_key(&who) } /// Add a user to the sorted alliance member set. @@ -1105,7 +953,7 @@ impl, I: 'static> Pallet { })?; if role == MemberRole::Founder || role == MemberRole::Fellow { - let members = Self::voting_members_sorted(); + let members = Self::votable_members_sorted(); T::MembershipChanged::change_members_sorted(&[who.clone()], &[], &members[..]); } Ok(()) @@ -1120,7 +968,7 @@ impl, I: 'static> Pallet { })?; if matches!(role, MemberRole::Founder | MemberRole::Fellow) { - let members = Self::voting_members_sorted(); + let members = Self::votable_members_sorted(); T::MembershipChanged::change_members_sorted(&[], &[who.clone()], &members[..]); } Ok(()) @@ -1222,19 +1070,4 @@ impl, I: 'static> Pallet { } res } - - fn do_close( - proposal_hash: T::Hash, - index: ProposalIndex, - proposal_weight_bound: Weight, - length_bound: u32, - ) -> DispatchResultWithPostInfo { - let info = T::ProposalProvider::close_proposal( - proposal_hash, - index, - proposal_weight_bound, - length_bound, - )?; - Ok(info.into()) - } } diff --git a/frame/alliance/src/migration.rs b/frame/alliance/src/migration.rs deleted file mode 100644 index 8f98484240061..0000000000000 --- a/frame/alliance/src/migration.rs +++ /dev/null @@ -1,79 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{Config, Pallet, Weight, LOG_TARGET}; -use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; -use log; - -/// The current storage version. -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - -/// Wrapper for all migrations of this pallet. -pub fn migrate, I: 'static>() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - let mut weight: Weight = Weight::zero(); - - if onchain_version < 1 { - weight = weight.saturating_add(v0_to_v1::migrate::()); - } - - STORAGE_VERSION.put::>(); - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - - weight -} - -/// Implements `OnRuntimeUpgrade` trait. -pub struct Migration(PhantomData<(T, I)>); - -impl, I: 'static> OnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - migrate::() - } -} - -/// v0_to_v1: `UpForKicking` is replaced by a retirement period. -mod v0_to_v1 { - use super::*; - - pub fn migrate, I: 'static>() -> Weight { - log::info!(target: LOG_TARGET, "Running migration v0_to_v1."); - - let res = migration::clear_storage_prefix( - >::name().as_bytes(), - b"UpForKicking", - b"", - None, - None, - ); - - log::info!( - target: LOG_TARGET, - "Cleared '{}' entries from 'UpForKicking' storage prefix", - res.unique - ); - - if res.maybe_cursor.is_some() { - log::error!( - target: LOG_TARGET, - "Storage prefix 'UpForKicking' is not completely cleared." - ); - } - - T::DbWeight::get().writes(res.unique.into()) - } -} diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index 196e0003b537f..91986300aa2e1 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -18,7 +18,6 @@ //! Test utilities pub use sp_core::H256; -use sp_runtime::traits::Hash; pub use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, @@ -38,25 +37,23 @@ pub use crate as pallet_alliance; use super::*; -type BlockNumber = u64; - parameter_types! { - pub const BlockHashCount: BlockNumber = 250; + pub const BlockHashCount: u64 = 250; } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; - type BlockNumber = BlockNumber; + type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); @@ -77,7 +74,7 @@ parameter_types! { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -86,18 +83,16 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; } -const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; - parameter_types! { - pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; + pub const MotionDuration: u64 = 3; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; } type AllianceCollective = pallet_collective::Instance1; impl pallet_collective::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = MotionDuration; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -124,7 +119,7 @@ type EnsureOneOrRoot = EitherOfDiverse, EnsureSignedBy type EnsureTwoOrRoot = EitherOfDiverse, EnsureSignedBy>; impl pallet_identity::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; type FieldDeposit = FieldDeposit; @@ -162,11 +157,11 @@ impl IdentityVerifier for AllianceIdentityVerifier { } pub struct AllianceProposalProvider; -impl ProposalProvider for AllianceProposalProvider { +impl ProposalProvider for AllianceProposalProvider { fn propose_proposal( who: u64, threshold: u32, - proposal: Box, + proposal: Box, length_bound: u32, ) -> Result<(u32, u32), DispatchError> { AllianceMotion::do_propose_proposed(who, threshold, proposal, length_bound) @@ -194,7 +189,7 @@ impl ProposalProvider for AllianceProposalProvider { AllianceMotion::do_close(proposal_hash, proposal_index, proposal_weight_bound, length_bound) } - fn proposal_of(proposal_hash: H256) -> Option { + fn proposal_of(proposal_hash: H256) -> Option { AllianceMotion::proposal_of(proposal_hash) } } @@ -204,11 +199,10 @@ parameter_types! { pub const MaxFellows: u32 = MaxMembers::get() - MaxFounders::get(); pub const MaxAllies: u32 = 100; pub const AllyDeposit: u64 = 25; - pub const RetirementPeriod: BlockNumber = MOTION_DURATION_IN_BLOCKS + 1; } impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Proposal = RuntimeCall; + type Event = Event; + type Proposal = Call; type AdminOrigin = EnsureSignedBy; type MembershipManager = EnsureSignedBy; type AnnouncementOrigin = EnsureSignedBy; @@ -231,7 +225,6 @@ impl Config for Test { type MaxMembersCount = MaxMembers; type AllyDeposit = AllyDeposit; type WeightInfo = (); - type RetirementPeriod = RetirementPeriod; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -255,17 +248,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 50), - (2, 50), - (3, 50), - (4, 50), - (5, 30), - (6, 50), - (7, 50), - (8, 50), - (9, 50), - ], + balances: vec![(1, 50), (2, 50), (3, 50), (4, 50), (5, 30), (6, 50), (7, 50)], } .assimilate_storage(&mut t) .unwrap(); @@ -283,7 +266,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 1)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 1)); let info = IdentityInfo { additional: BoundedVec::default(), @@ -296,71 +279,25 @@ pub fn new_test_ext() -> sp_io::TestExternalities { image: Data::default(), twitter: Data::default(), }; - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(1), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 1, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(2), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 2, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(3), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 3, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(4), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 4, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(5), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 5, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(6), Box::new(info.clone()))); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(8), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 8, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(9), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(1), - 0, - 9, - Judgement::KnownGood, - BlakeTwo256::hash_of(&info) - )); + assert_ok!(Identity::set_identity(Origin::signed(1), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 1, Judgement::KnownGood)); + assert_ok!(Identity::set_identity(Origin::signed(2), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 2, Judgement::KnownGood)); + assert_ok!(Identity::set_identity(Origin::signed(3), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 3, Judgement::KnownGood)); + assert_ok!(Identity::set_identity(Origin::signed(4), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 4, Judgement::KnownGood)); + assert_ok!(Identity::set_identity(Origin::signed(5), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 5, Judgement::KnownGood)); + assert_ok!(Identity::set_identity(Origin::signed(6), Box::new(info.clone()))); // Joining before init should fail. assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(1)), + Alliance::join_alliance(Origin::signed(1)), Error::::AllianceNotYetInitialized ); - assert_ok!(Alliance::init_members(RuntimeOrigin::root(), vec![1, 2], vec![3], vec![])); + assert_ok!(Alliance::init_members(Origin::root(), vec![1, 2], vec![3], vec![])); System::set_block_number(1); }); @@ -380,20 +317,14 @@ pub fn test_cid() -> Cid { Cid::new_v0(&*result) } -pub fn make_remark_proposal(value: u64) -> (RuntimeCall, u32, H256) { - make_proposal(RuntimeCall::System(frame_system::Call::remark { remark: value.encode() })) -} - -pub fn make_set_rule_proposal(rule: Cid) -> (RuntimeCall, u32, H256) { - make_proposal(RuntimeCall::Alliance(pallet_alliance::Call::set_rule { rule })) +pub fn make_proposal(value: u64) -> Call { + Call::System(frame_system::Call::remark { remark: value.encode() }) } -pub fn make_kick_member_proposal(who: u64) -> (RuntimeCall, u32, H256) { - make_proposal(RuntimeCall::Alliance(pallet_alliance::Call::kick_member { who })) +pub fn make_set_rule_proposal(rule: Cid) -> Call { + Call::Alliance(pallet_alliance::Call::set_rule { rule }) } -pub fn make_proposal(proposal: RuntimeCall) -> (RuntimeCall, u32, H256) { - let len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - (proposal, len, hash) +pub fn make_kick_member_proposal(who: u64) -> Call { + Call::Alliance(pallet_alliance::Call::kick_member { who }) } diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index c55826768c695..85c91b451d351 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -17,7 +17,9 @@ //! Tests for the alliance pallet. -use frame_support::{assert_noop, assert_ok, error::BadOrigin}; +use sp_runtime::traits::Hash; + +use frame_support::{assert_noop, assert_ok, Hashable}; use frame_system::{EventRecord, Phase}; use super::*; @@ -25,133 +27,21 @@ use crate::mock::*; type AllianceMotionEvent = pallet_collective::Event; -#[test] -fn init_members_works() { - new_test_ext().execute_with(|| { - // alliance must be reset first, no witness data - assert_noop!( - Alliance::init_members(RuntimeOrigin::root(), vec![8], vec![], vec![],), - Error::::AllianceAlreadyInitialized, - ); - - // give a retirement notice to check later a retiring member not removed - assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(2))); - assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); - - // disband the Alliance to init new - assert_ok!(Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(2, 0))); - - // fails without root - assert_noop!( - Alliance::init_members(RuntimeOrigin::signed(1), vec![], vec![], vec![]), - BadOrigin - ); - - // founders missing, other members given - assert_noop!( - Alliance::init_members(RuntimeOrigin::root(), vec![], vec![4], vec![2],), - Error::::FoundersMissing, - ); - - // success call - assert_ok!(Alliance::init_members(RuntimeOrigin::root(), vec![8, 5], vec![4], vec![2],)); - - // assert new set of voting members - assert_eq!(Alliance::voting_members_sorted(), vec![4, 5, 8]); - // assert new members member - assert!(Alliance::is_founder(&8)); - assert!(Alliance::is_founder(&5)); - assert!(Alliance::is_fellow(&4)); - assert!(Alliance::is_ally(&2)); - // assert a retiring member from previous Alliance not removed - assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); - - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MembersInitialized { - founders: vec![5, 8], - fellows: vec![4], - allies: vec![2], - })); - }) -} - -#[test] -fn disband_works() { - new_test_ext().execute_with(|| { - // ensure alliance is set - assert_eq!(Alliance::voting_members_sorted(), vec![1, 2, 3]); - - // give a retirement notice to check later a retiring member not removed - assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(2))); - assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); - - // join alliance and reserve funds - assert_eq!(Balances::free_balance(9), 40); - assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(9))); - assert_eq!(Alliance::deposit_of(9), Some(25)); - assert_eq!(Balances::free_balance(9), 15); - assert!(Alliance::is_member_of(&9, MemberRole::Ally)); - - // fails without root - assert_noop!(Alliance::disband(RuntimeOrigin::signed(1), Default::default()), BadOrigin); - - // bad witness data checks - assert_noop!( - Alliance::disband(RuntimeOrigin::root(), Default::default(),), - Error::::BadWitness - ); - - assert_noop!( - Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(1, 1)), - Error::::BadWitness, - ); - assert_noop!( - Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(2, 0)), - Error::::BadWitness, - ); - - // success call - assert_ok!(Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(2, 1))); - - // assert members disband - assert!(!Alliance::is_member(&1)); - assert!(!Alliance::is_initialized()); - // assert a retiring member from the previous Alliance not removed - assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); - // deposit unreserved - assert_eq!(Balances::free_balance(9), 40); - - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::AllianceDisbanded { - voting_members: 2, - ally_members: 1, - unreserved: 1, - })); - - // the Alliance must be set first - assert_noop!( - Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(100, 100)), - Error::::AllianceNotYetInitialized, - ); - }) -} - #[test] fn propose_works() { new_test_ext().execute_with(|| { - let (proposal, proposal_len, hash) = make_remark_proposal(42); + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); - // only voting member can propose proposal, 4 is ally not have vote rights + // only votable member can propose proposal, 4 is ally not have vote rights assert_noop!( - Alliance::propose( - RuntimeOrigin::signed(4), - 3, - Box::new(proposal.clone()), - proposal_len - ), + Alliance::propose(Origin::signed(4), 3, Box::new(proposal.clone()), proposal_len), Error::::NoVotingRights ); assert_ok!(Alliance::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -162,7 +52,7 @@ fn propose_works() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { + event: mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, @@ -177,28 +67,30 @@ fn propose_works() { #[test] fn vote_works() { new_test_ext().execute_with(|| { - let (proposal, proposal_len, hash) = make_remark_proposal(42); + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); assert_ok!(Alliance::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Alliance::vote(Origin::signed(2), hash.clone(), 0, true)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( System::events(), vec![ - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash, + proposal_hash: hash.clone(), threshold: 3 })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 2, - proposal_hash: hash, + proposal_hash: hash.clone(), voted: true, yes: 1, no: 0, @@ -211,23 +103,27 @@ fn vote_works() { #[test] fn veto_works() { new_test_ext().execute_with(|| { - let (proposal, proposal_len, hash) = make_remark_proposal(42); + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); assert_ok!(Alliance::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); // only set_rule/elevate_ally can be veto assert_noop!( - Alliance::veto(RuntimeOrigin::signed(1), hash), + Alliance::veto(Origin::signed(1), hash.clone()), Error::::NotVetoableProposal ); let cid = test_cid(); - let (vetoable_proposal, vetoable_proposal_len, vetoable_hash) = make_set_rule_proposal(cid); + let vetoable_proposal = make_set_rule_proposal(cid); + let vetoable_proposal_len: u32 = vetoable_proposal.using_encoded(|p| p.len() as u32); + let vetoable_hash: H256 = vetoable_proposal.blake2_256().into(); assert_ok!(Alliance::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(vetoable_proposal.clone()), vetoable_proposal_len @@ -235,29 +131,29 @@ fn veto_works() { // only founder have veto rights, 3 is fellow assert_noop!( - Alliance::veto(RuntimeOrigin::signed(3), vetoable_hash), + Alliance::veto(Origin::signed(3), vetoable_hash.clone()), Error::::NotFounder ); - assert_ok!(Alliance::veto(RuntimeOrigin::signed(2), vetoable_hash)); + assert_ok!(Alliance::veto(Origin::signed(2), vetoable_hash.clone())); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( System::events(), vec![ - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash, + proposal_hash: hash.clone(), threshold: 3 })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 1, - proposal_hash: vetoable_hash, + proposal_hash: vetoable_hash.clone(), threshold: 3 })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Disapproved { - proposal_hash: vetoable_hash + record(mock::Event::AllianceMotion(AllianceMotionEvent::Disapproved { + proposal_hash: vetoable_hash.clone() })), ] ); @@ -267,20 +163,22 @@ fn veto_works() { #[test] fn close_works() { new_test_ext().execute_with(|| { - let (proposal, proposal_len, hash) = make_remark_proposal(42); + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Alliance::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Alliance::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Alliance::vote(RuntimeOrigin::signed(3), hash, 0, true)); + assert_ok!(Alliance::vote(Origin::signed(1), hash.clone(), 0, true)); + assert_ok!(Alliance::vote(Origin::signed(2), hash.clone(), 0, true)); + assert_ok!(Alliance::vote(Origin::signed(3), hash.clone(), 0, true)); assert_ok!(Alliance::close( - RuntimeOrigin::signed(1), - hash, + Origin::signed(1), + hash.clone(), 0, proposal_weight, proposal_len @@ -290,43 +188,43 @@ fn close_works() { assert_eq!( System::events(), vec![ - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash, + proposal_hash: hash.clone(), threshold: 3 })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 1, - proposal_hash: hash, + proposal_hash: hash.clone(), voted: true, yes: 1, no: 0, })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 2, - proposal_hash: hash, + proposal_hash: hash.clone(), voted: true, yes: 2, no: 0, })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 3, - proposal_hash: hash, + proposal_hash: hash.clone(), voted: true, yes: 3, no: 0, })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Closed { - proposal_hash: hash, + record(mock::Event::AllianceMotion(AllianceMotionEvent::Closed { + proposal_hash: hash.clone(), yes: 3, no: 0, })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Approved { - proposal_hash: hash + record(mock::Event::AllianceMotion(AllianceMotionEvent::Approved { + proposal_hash: hash.clone() })), - record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Executed { - proposal_hash: hash, + record(mock::Event::AllianceMotion(AllianceMotionEvent::Executed { + proposal_hash: hash.clone(), result: Err(DispatchError::BadOrigin), })) ] @@ -338,12 +236,10 @@ fn close_works() { fn set_rule_works() { new_test_ext().execute_with(|| { let cid = test_cid(); - assert_ok!(Alliance::set_rule(RuntimeOrigin::signed(1), cid.clone())); + assert_ok!(Alliance::set_rule(Origin::signed(1), cid.clone())); assert_eq!(Alliance::rule(), Some(cid.clone())); - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::NewRuleSet { - rule: cid, - })); + System::assert_last_event(mock::Event::Alliance(crate::Event::NewRuleSet { rule: cid })); }); } @@ -351,13 +247,10 @@ fn set_rule_works() { fn announce_works() { new_test_ext().execute_with(|| { let cid = test_cid(); - - assert_noop!(Alliance::announce(RuntimeOrigin::signed(2), cid.clone()), BadOrigin); - - assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); + assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { + System::assert_last_event(mock::Event::Alliance(crate::Event::Announced { announcement: cid, })); }); @@ -367,19 +260,19 @@ fn announce_works() { fn remove_announcement_works() { new_test_ext().execute_with(|| { let cid = test_cid(); - assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); + assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { + System::assert_last_event(mock::Event::Alliance(crate::Event::Announced { announcement: cid.clone(), })); System::set_block_number(2); - assert_ok!(Alliance::remove_announcement(RuntimeOrigin::signed(3), cid.clone())); + assert_ok!(Alliance::remove_announcement(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![]); - System::assert_last_event(mock::RuntimeEvent::Alliance( - crate::Event::AnnouncementRemoved { announcement: cid }, - )); + System::assert_last_event(mock::Event::Alliance(crate::Event::AnnouncementRemoved { + announcement: cid, + })); }); } @@ -387,52 +280,46 @@ fn remove_announcement_works() { fn join_alliance_works() { new_test_ext().execute_with(|| { // check already member - assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(1)), - Error::::AlreadyMember - ); + assert_noop!(Alliance::join_alliance(Origin::signed(1)), Error::::AlreadyMember); // check already listed as unscrupulous assert_ok!(Alliance::add_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(4)), + Alliance::join_alliance(Origin::signed(4)), Error::::AccountNonGrata ); assert_ok!(Alliance::remove_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); // check deposit funds assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(5)), + Alliance::join_alliance(Origin::signed(5)), Error::::InsufficientFunds ); // success to submit - assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); + assert_ok!(Alliance::join_alliance(Origin::signed(4))); assert_eq!(Alliance::deposit_of(4), Some(25)); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); // check already member - assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(4)), - Error::::AlreadyMember - ); + assert_noop!(Alliance::join_alliance(Origin::signed(4)), Error::::AlreadyMember); // check missing identity judgement #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(6)), + Alliance::join_alliance(Origin::signed(6)), Error::::WithoutGoodIdentityJudgement ); // check missing identity info #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(7)), + Alliance::join_alliance(Origin::signed(7)), Error::::WithoutIdentityDisplayAndWebsite ); }); @@ -443,51 +330,51 @@ fn nominate_ally_works() { new_test_ext().execute_with(|| { // check already member assert_noop!( - Alliance::nominate_ally(RuntimeOrigin::signed(1), 2), + Alliance::nominate_ally(Origin::signed(1), 2), Error::::AlreadyMember ); - // only voting member(founder/fellow) have nominate right + // only votable member(founder/fellow) have nominate right assert_noop!( - Alliance::nominate_ally(RuntimeOrigin::signed(5), 4), + Alliance::nominate_ally(Origin::signed(5), 4), Error::::NoVotingRights ); // check already listed as unscrupulous assert_ok!(Alliance::add_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); assert_noop!( - Alliance::nominate_ally(RuntimeOrigin::signed(1), 4), + Alliance::nominate_ally(Origin::signed(1), 4), Error::::AccountNonGrata ); assert_ok!(Alliance::remove_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); // success to nominate - assert_ok!(Alliance::nominate_ally(RuntimeOrigin::signed(1), 4)); + assert_ok!(Alliance::nominate_ally(Origin::signed(1), 4)); assert_eq!(Alliance::deposit_of(4), None); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); // check already member assert_noop!( - Alliance::nominate_ally(RuntimeOrigin::signed(1), 4), + Alliance::nominate_ally(Origin::signed(1), 4), Error::::AlreadyMember ); // check missing identity judgement #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(6)), + Alliance::join_alliance(Origin::signed(6)), Error::::WithoutGoodIdentityJudgement ); // check missing identity info #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(RuntimeOrigin::signed(7)), + Alliance::join_alliance(Origin::signed(7)), Error::::WithoutIdentityDisplayAndWebsite ); }); @@ -496,137 +383,63 @@ fn nominate_ally_works() { #[test] fn elevate_ally_works() { new_test_ext().execute_with(|| { - assert_noop!( - Alliance::elevate_ally(RuntimeOrigin::signed(2), 4), - Error::::NotAlly - ); + assert_noop!(Alliance::elevate_ally(Origin::signed(2), 4), Error::::NotAlly); - assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); + assert_ok!(Alliance::join_alliance(Origin::signed(4))); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); - assert_ok!(Alliance::elevate_ally(RuntimeOrigin::signed(2), 4)); + assert_ok!(Alliance::elevate_ally(Origin::signed(2), 4)); assert_eq!(Alliance::members(MemberRole::Ally), Vec::::new()); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3, 4]); }); } -#[test] -fn give_retirement_notice_work() { - new_test_ext().execute_with(|| { - assert_noop!( - Alliance::give_retirement_notice(RuntimeOrigin::signed(4)), - Error::::NotMember - ); - - assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); - assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); - System::assert_last_event(mock::RuntimeEvent::Alliance( - crate::Event::MemberRetirementPeriodStarted { member: (3) }, - )); - - assert_noop!( - Alliance::give_retirement_notice(RuntimeOrigin::signed(3)), - Error::::AlreadyRetiring - ); - }); -} - #[test] fn retire_works() { new_test_ext().execute_with(|| { - assert_noop!( - Alliance::retire(RuntimeOrigin::signed(2)), - Error::::RetirementNoticeNotGiven - ); + let proposal = make_kick_member_proposal(2); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + assert_ok!(Alliance::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_noop!(Alliance::retire(Origin::signed(2)), Error::::UpForKicking); - assert_noop!( - Alliance::retire(RuntimeOrigin::signed(4)), - Error::::RetirementNoticeNotGiven - ); + assert_noop!(Alliance::retire(Origin::signed(4)), Error::::NotMember); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); - assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); - assert_noop!( - Alliance::retire(RuntimeOrigin::signed(3)), - Error::::RetirementPeriodNotPassed - ); - System::set_block_number(System::block_number() + RetirementPeriod::get()); - assert_ok!(Alliance::retire(RuntimeOrigin::signed(3))); + assert_ok!(Alliance::retire(Origin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberRetired { - member: (3), - unreserved: None, - })); - - // Move time on: - System::set_block_number(System::block_number() + RetirementPeriod::get()); - - assert_powerless(RuntimeOrigin::signed(3)); }); } -fn assert_powerless(user: RuntimeOrigin) { - //vote / veto with a valid propsal - let cid = test_cid(); - let (proposal, _, _) = make_kick_member_proposal(42); - - assert_noop!(Alliance::init_members(user.clone(), vec![], vec![], vec![],), BadOrigin); - - assert_noop!( - Alliance::disband(user.clone(), DisbandWitness { voting_members: 3, ..Default::default() }), - BadOrigin - ); - - assert_noop!(Alliance::set_rule(user.clone(), cid.clone()), BadOrigin); - - assert_noop!(Alliance::retire(user.clone()), Error::::RetirementNoticeNotGiven); - - assert_noop!(Alliance::give_retirement_notice(user.clone()), Error::::NotMember); - - assert_noop!(Alliance::elevate_ally(user.clone(), 4), BadOrigin); - - assert_noop!(Alliance::kick_member(user.clone(), 1), BadOrigin); - - assert_noop!(Alliance::nominate_ally(user.clone(), 4), Error::::NoVotingRights); - - assert_noop!( - Alliance::propose(user.clone(), 5, Box::new(proposal), 1000), - Error::::NoVotingRights - ); -} - #[test] fn kick_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Alliance::kick_member(RuntimeOrigin::signed(4), 4), BadOrigin); - - assert_noop!( - Alliance::kick_member(RuntimeOrigin::signed(2), 4), - Error::::NotMember - ); - - >::insert(2, 25); + let proposal = make_kick_member_proposal(2); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + assert_ok!(Alliance::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_eq!(Alliance::up_for_kicking(2), true); assert_eq!(Alliance::members(MemberRole::Founder), vec![1, 2]); - assert_ok!(Alliance::kick_member(RuntimeOrigin::signed(2), 2)); + + assert_ok!(Alliance::kick_member(Origin::signed(2), 2)); assert_eq!(Alliance::members(MemberRole::Founder), vec![1]); - assert_eq!(>::get(2), None); - System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberKicked { - member: (2), - slashed: Some(25), - })); }); } #[test] fn add_unscrupulous_items_works() { new_test_ext().execute_with(|| { - assert_noop!(Alliance::add_unscrupulous_items(RuntimeOrigin::signed(2), vec![]), BadOrigin); - assert_ok!(Alliance::add_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![ UnscrupulousItem::AccountId(3), UnscrupulousItem::Website("abc".as_bytes().to_vec().try_into().unwrap()) @@ -637,7 +450,7 @@ fn add_unscrupulous_items_works() { assert_noop!( Alliance::add_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(3)] ), Error::::AlreadyUnscrupulous @@ -648,26 +461,21 @@ fn add_unscrupulous_items_works() { #[test] fn remove_unscrupulous_items_works() { new_test_ext().execute_with(|| { - assert_noop!( - Alliance::remove_unscrupulous_items(RuntimeOrigin::signed(2), vec![]), - BadOrigin - ); - assert_noop!( Alliance::remove_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(3)] ), Error::::NotListedAsUnscrupulous ); assert_ok!(Alliance::add_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); assert_eq!(Alliance::unscrupulous_accounts(), vec![3]); assert_ok!(Alliance::remove_unscrupulous_items( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); assert_eq!(Alliance::unscrupulous_accounts(), Vec::::new()); diff --git a/frame/alliance/src/types.rs b/frame/alliance/src/types.rs index 90f7ce41b9613..8fb0ae96fd02d 100644 --- a/frame/alliance/src/types.rs +++ b/frame/alliance/src/types.rs @@ -93,30 +93,3 @@ impl Cid { } } } - -/// Witness data for the `disband` call. -#[derive( - Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo, Default, -)] -pub struct DisbandWitness { - /// Total number of voting members in the current Alliance. - #[codec(compact)] - pub(super) voting_members: u32, - /// Total number of ally members in the current Alliance. - #[codec(compact)] - pub(super) ally_members: u32, -} - -#[cfg(test)] -impl DisbandWitness { - // Creates new DisbandWitness. - pub(super) fn new(voting_members: u32, ally_members: u32) -> Self { - Self { voting_members, ally_members } - } -} - -impl DisbandWitness { - pub(super) fn is_zero(self) -> bool { - self == Self::default() - } -} diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 29038efd2c25c..495dd1b83df93 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,14 +18,12 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! DATE: 2021-10-11, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: -// ./target/production/substrate +// ./target/release/substrate // benchmark -// pallet // --chain=dev // --steps=50 // --repeat=20 @@ -35,9 +33,9 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/alliance/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs + #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -55,14 +53,12 @@ pub trait WeightInfo { fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight; fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight; fn init_members(x: u32, y: u32, z: u32, ) -> Weight; - fn disband(x: u32, y: u32, z: u32, ) -> Weight; fn set_rule() -> Weight; fn announce() -> Weight; fn remove_announcement() -> Weight; fn join_alliance() -> Weight; fn nominate_ally() -> Weight; fn elevate_ally() -> Weight; - fn give_retirement_notice() -> Weight; fn retire() -> Weight; fn kick_member() -> Weight; fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight; @@ -77,242 +73,173 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion ProposalCount (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[0, 90]`. - /// The range of component `p` is `[1, 100]`. fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 43_720 nanoseconds. - Weight::from_ref_time(44_766_307 as u64) - // Standard Error: 2_522 - .saturating_add(Weight::from_ref_time(54_721 as u64).saturating_mul(y as u64)) - // Standard Error: 2_301 - .saturating_add(Weight::from_ref_time(173_300 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (39_992_000 as Weight) + // Standard Error: 2_000 + .saturating_add((44_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((323_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) - /// The range of component `x` is `[3, 10]`. - /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - // Minimum execution time: 46_984 nanoseconds. - Weight::from_ref_time(46_837_255 as u64) - // Standard Error: 32_860 - .saturating_add(Weight::from_ref_time(273_691 as u64).saturating_mul(x as u64)) - // Standard Error: 2_781 - .saturating_add(Weight::from_ref_time(126_964 as u64).saturating_mul(y as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (36_649_000 as Weight) + // Standard Error: 90_000 + .saturating_add((42_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 3_000 + .saturating_add((195_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - // Minimum execution time: 34_734 nanoseconds. - Weight::from_ref_time(37_652_708 as u64) - // Standard Error: 1_270 - .saturating_add(Weight::from_ref_time(183_078 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (30_301_000 as Weight) + // Standard Error: 1_000 + .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:1) - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 50_147 nanoseconds. - Weight::from_ref_time(42_719_616 as u64) - // Standard Error: 19_981 - .saturating_add(Weight::from_ref_time(188_796 as u64).saturating_mul(x as u64)) - // Standard Error: 1_947 - .saturating_add(Weight::from_ref_time(95_998 as u64).saturating_mul(y as u64)) - // Standard Error: 1_739 - .saturating_add(Weight::from_ref_time(177_837 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (40_472_000 as Weight) + // Standard Error: 69_000 + .saturating_add((485_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 2_000 + .saturating_add((192_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 59_495 nanoseconds. - Weight::from_ref_time(53_137_721 as u64) - // Standard Error: 138 - .saturating_add(Weight::from_ref_time(1_979 as u64).saturating_mul(b as u64)) - // Standard Error: 16_388 - .saturating_add(Weight::from_ref_time(8_198 as u64).saturating_mul(x as u64)) - // Standard Error: 1_599 - .saturating_add(Weight::from_ref_time(86_577 as u64).saturating_mul(y as u64)) - // Standard Error: 1_428 - .saturating_add(Weight::from_ref_time(215_905 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (52_076_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 77_000 + .saturating_add((194_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 3_000 + .saturating_add((188_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((329_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:1) - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 52_405 nanoseconds. - Weight::from_ref_time(44_494_732 as u64) - // Standard Error: 1_759 - .saturating_add(Weight::from_ref_time(118_517 as u64).saturating_mul(y as u64)) - // Standard Error: 1_572 - .saturating_add(Weight::from_ref_time(198_256 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight { + (47_009_000 as Weight) + // Standard Error: 66_000 + .saturating_add((256_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 2_000 + .saturating_add((176_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((327_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 52_737 nanoseconds. - Weight::from_ref_time(45_874_458 as u64) - // Standard Error: 140 - .saturating_add(Weight::from_ref_time(601 as u64).saturating_mul(b as u64)) - // Standard Error: 1_623 - .saturating_add(Weight::from_ref_time(88_372 as u64).saturating_mul(y as u64)) - // Standard Error: 1_449 - .saturating_add(Weight::from_ref_time(197_595 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + (43_650_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 85_000 + .saturating_add((124_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 3_000 + .saturating_add((199_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 3_000 + .saturating_add((326_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:3 w:3) // Storage: AllianceMotion Members (r:1 w:1) - /// The range of component `x` is `[1, 10]`. - /// The range of component `y` is `[0, 90]`. - /// The range of component `z` is `[0, 100]`. - fn init_members(x: u32, y: u32, z: u32, ) -> Weight { - // Minimum execution time: 48_821 nanoseconds. - Weight::from_ref_time(32_972_152 as u64) - // Standard Error: 17_618 - .saturating_add(Weight::from_ref_time(230_451 as u64).saturating_mul(x as u64)) - // Standard Error: 1_865 - .saturating_add(Weight::from_ref_time(172_532 as u64).saturating_mul(y as u64)) - // Standard Error: 1_682 - .saturating_add(Weight::from_ref_time(145_258 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - } - // Storage: Alliance Members (r:3 w:3) - // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: Alliance DepositOf (r:101 w:50) - // Storage: System Account (r:50 w:50) - // Storage: AllianceMotion Members (r:0 w:1) - // Storage: AllianceMotion Prime (r:0 w:1) - /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[0, 100]`. - /// The range of component `z` is `[0, 50]`. - fn disband(x: u32, y: u32, z: u32, ) -> Weight { - // Minimum execution time: 256_235 nanoseconds. - Weight::from_ref_time(258_695_000 as u64) - // Standard Error: 19_643 - .saturating_add(Weight::from_ref_time(436_821 as u64).saturating_mul(x as u64)) - // Standard Error: 19_549 - .saturating_add(Weight::from_ref_time(496_858 as u64).saturating_mul(y as u64)) - // Standard Error: 39_062 - .saturating_add(Weight::from_ref_time(9_169_692 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(y as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(z as u64))) - .saturating_add(T::DbWeight::get().writes(5 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(z as u64))) + fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { + (45_100_000 as Weight) + // Standard Error: 4_000 + .saturating_add((162_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 4_000 + .saturating_add((151_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - // Minimum execution time: 19_205 nanoseconds. - Weight::from_ref_time(19_502_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (14_517_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - // Minimum execution time: 22_562 nanoseconds. - Weight::from_ref_time(22_842_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (16_801_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - // Minimum execution time: 23_773 nanoseconds. - Weight::from_ref_time(24_212_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (17_133_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) + // Storage: Alliance Members (r:4 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - // Minimum execution time: 57_709 nanoseconds. - Weight::from_ref_time(59_155_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (95_370_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } - // Storage: Alliance Members (r:4 w:1) + // Storage: Alliance Members (r:4 w:0) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - // Minimum execution time: 44_576 nanoseconds. - Weight::from_ref_time(45_162_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (44_764_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - // Minimum execution time: 38_913 nanoseconds. - Weight::from_ref_time(39_637_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (44_013_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } - // Storage: Alliance Members (r:4 w:2) + // Storage: Alliance KickingMembers (r:1 w:0) + // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: AllianceMotion Members (r:0 w:1) - // Storage: AllianceMotion Prime (r:0 w:1) - // Storage: Alliance RetiringMembers (r:0 w:1) - fn give_retirement_notice() -> Weight { - // Minimum execution time: 42_947 nanoseconds. - Weight::from_ref_time(43_414_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) - } - // Storage: Alliance RetiringMembers (r:1 w:1) - // Storage: Alliance Members (r:1 w:1) // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: AllianceMotion Members (r:0 w:1) + // Storage: AllianceMotion Prime (r:0 w:1) fn retire() -> Weight { - // Minimum execution time: 46_281 nanoseconds. - Weight::from_ref_time(46_703_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (60_183_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } + // Storage: Alliance KickingMembers (r:1 w:0) // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: Alliance DepositOf (r:1 w:1) @@ -320,38 +247,31 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - // Minimum execution time: 65_274 nanoseconds. - Weight::from_ref_time(65_762_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (67_467_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[0, 100]`. - /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - // Minimum execution time: 17_396 nanoseconds. - Weight::from_ref_time(17_638_000 as u64) - // Standard Error: 2_602 - .saturating_add(Weight::from_ref_time(1_286_177 as u64).saturating_mul(n as u64)) - // Standard Error: 1_019 - .saturating_add(Weight::from_ref_time(70_947 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (0 as Weight) + // Standard Error: 16_000 + .saturating_add((2_673_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 7_000 + .saturating_add((224_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[0, 100]`. - /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - // Minimum execution time: 17_446 nanoseconds. - Weight::from_ref_time(17_725_000 as u64) - // Standard Error: 163_579 - .saturating_add(Weight::from_ref_time(12_823_232 as u64).saturating_mul(n as u64)) - // Standard Error: 64_064 - .saturating_add(Weight::from_ref_time(496_642 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (0 as Weight) + // Standard Error: 343_000 + .saturating_add((59_025_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 153_000 + .saturating_add((6_725_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -362,242 +282,173 @@ impl WeightInfo for () { // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion ProposalCount (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[0, 90]`. - /// The range of component `p` is `[1, 100]`. fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 43_720 nanoseconds. - Weight::from_ref_time(44_766_307 as u64) - // Standard Error: 2_522 - .saturating_add(Weight::from_ref_time(54_721 as u64).saturating_mul(y as u64)) - // Standard Error: 2_301 - .saturating_add(Weight::from_ref_time(173_300 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (39_992_000 as Weight) + // Standard Error: 2_000 + .saturating_add((44_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((323_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) - /// The range of component `x` is `[3, 10]`. - /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - // Minimum execution time: 46_984 nanoseconds. - Weight::from_ref_time(46_837_255 as u64) - // Standard Error: 32_860 - .saturating_add(Weight::from_ref_time(273_691 as u64).saturating_mul(x as u64)) - // Standard Error: 2_781 - .saturating_add(Weight::from_ref_time(126_964 as u64).saturating_mul(y as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (36_649_000 as Weight) + // Standard Error: 90_000 + .saturating_add((42_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 3_000 + .saturating_add((195_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - // Minimum execution time: 34_734 nanoseconds. - Weight::from_ref_time(37_652_708 as u64) - // Standard Error: 1_270 - .saturating_add(Weight::from_ref_time(183_078 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (30_301_000 as Weight) + // Standard Error: 1_000 + .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:1) - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 50_147 nanoseconds. - Weight::from_ref_time(42_719_616 as u64) - // Standard Error: 19_981 - .saturating_add(Weight::from_ref_time(188_796 as u64).saturating_mul(x as u64)) - // Standard Error: 1_947 - .saturating_add(Weight::from_ref_time(95_998 as u64).saturating_mul(y as u64)) - // Standard Error: 1_739 - .saturating_add(Weight::from_ref_time(177_837 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (40_472_000 as Weight) + // Standard Error: 69_000 + .saturating_add((485_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 2_000 + .saturating_add((192_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 59_495 nanoseconds. - Weight::from_ref_time(53_137_721 as u64) - // Standard Error: 138 - .saturating_add(Weight::from_ref_time(1_979 as u64).saturating_mul(b as u64)) - // Standard Error: 16_388 - .saturating_add(Weight::from_ref_time(8_198 as u64).saturating_mul(x as u64)) - // Standard Error: 1_599 - .saturating_add(Weight::from_ref_time(86_577 as u64).saturating_mul(y as u64)) - // Standard Error: 1_428 - .saturating_add(Weight::from_ref_time(215_905 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (52_076_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 77_000 + .saturating_add((194_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 3_000 + .saturating_add((188_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((329_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:1) - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 52_405 nanoseconds. - Weight::from_ref_time(44_494_732 as u64) - // Standard Error: 1_759 - .saturating_add(Weight::from_ref_time(118_517 as u64).saturating_mul(y as u64)) - // Standard Error: 1_572 - .saturating_add(Weight::from_ref_time(198_256 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight { + (47_009_000 as Weight) + // Standard Error: 66_000 + .saturating_add((256_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 2_000 + .saturating_add((176_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 2_000 + .saturating_add((327_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `x` is `[2, 10]`. - /// The range of component `y` is `[2, 90]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - // Minimum execution time: 52_737 nanoseconds. - Weight::from_ref_time(45_874_458 as u64) - // Standard Error: 140 - .saturating_add(Weight::from_ref_time(601 as u64).saturating_mul(b as u64)) - // Standard Error: 1_623 - .saturating_add(Weight::from_ref_time(88_372 as u64).saturating_mul(y as u64)) - // Standard Error: 1_449 - .saturating_add(Weight::from_ref_time(197_595 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + (43_650_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 85_000 + .saturating_add((124_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 3_000 + .saturating_add((199_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 3_000 + .saturating_add((326_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:3 w:3) // Storage: AllianceMotion Members (r:1 w:1) - /// The range of component `x` is `[1, 10]`. - /// The range of component `y` is `[0, 90]`. - /// The range of component `z` is `[0, 100]`. - fn init_members(x: u32, y: u32, z: u32, ) -> Weight { - // Minimum execution time: 48_821 nanoseconds. - Weight::from_ref_time(32_972_152 as u64) - // Standard Error: 17_618 - .saturating_add(Weight::from_ref_time(230_451 as u64).saturating_mul(x as u64)) - // Standard Error: 1_865 - .saturating_add(Weight::from_ref_time(172_532 as u64).saturating_mul(y as u64)) - // Standard Error: 1_682 - .saturating_add(Weight::from_ref_time(145_258 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - } - // Storage: Alliance Members (r:3 w:3) - // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: Alliance DepositOf (r:101 w:50) - // Storage: System Account (r:50 w:50) - // Storage: AllianceMotion Members (r:0 w:1) - // Storage: AllianceMotion Prime (r:0 w:1) - /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[0, 100]`. - /// The range of component `z` is `[0, 50]`. - fn disband(x: u32, y: u32, z: u32, ) -> Weight { - // Minimum execution time: 256_235 nanoseconds. - Weight::from_ref_time(258_695_000 as u64) - // Standard Error: 19_643 - .saturating_add(Weight::from_ref_time(436_821 as u64).saturating_mul(x as u64)) - // Standard Error: 19_549 - .saturating_add(Weight::from_ref_time(496_858 as u64).saturating_mul(y as u64)) - // Standard Error: 39_062 - .saturating_add(Weight::from_ref_time(9_169_692 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(y as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(z as u64))) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(z as u64))) + fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { + (45_100_000 as Weight) + // Standard Error: 4_000 + .saturating_add((162_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 4_000 + .saturating_add((151_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - // Minimum execution time: 19_205 nanoseconds. - Weight::from_ref_time(19_502_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (14_517_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - // Minimum execution time: 22_562 nanoseconds. - Weight::from_ref_time(22_842_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (16_801_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - // Minimum execution time: 23_773 nanoseconds. - Weight::from_ref_time(24_212_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (17_133_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) + // Storage: Alliance Members (r:4 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - // Minimum execution time: 57_709 nanoseconds. - Weight::from_ref_time(59_155_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (95_370_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } - // Storage: Alliance Members (r:4 w:1) + // Storage: Alliance Members (r:4 w:0) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - // Minimum execution time: 44_576 nanoseconds. - Weight::from_ref_time(45_162_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (44_764_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - // Minimum execution time: 38_913 nanoseconds. - Weight::from_ref_time(39_637_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (44_013_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } - // Storage: Alliance Members (r:4 w:2) + // Storage: Alliance KickingMembers (r:1 w:0) + // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: AllianceMotion Members (r:0 w:1) - // Storage: AllianceMotion Prime (r:0 w:1) - // Storage: Alliance RetiringMembers (r:0 w:1) - fn give_retirement_notice() -> Weight { - // Minimum execution time: 42_947 nanoseconds. - Weight::from_ref_time(43_414_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) - } - // Storage: Alliance RetiringMembers (r:1 w:1) - // Storage: Alliance Members (r:1 w:1) // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: AllianceMotion Members (r:0 w:1) + // Storage: AllianceMotion Prime (r:0 w:1) fn retire() -> Weight { - // Minimum execution time: 46_281 nanoseconds. - Weight::from_ref_time(46_703_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (60_183_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } + // Storage: Alliance KickingMembers (r:1 w:0) // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: Alliance DepositOf (r:1 w:1) @@ -605,37 +456,30 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - // Minimum execution time: 65_274 nanoseconds. - Weight::from_ref_time(65_762_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (67_467_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[0, 100]`. - /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - // Minimum execution time: 17_396 nanoseconds. - Weight::from_ref_time(17_638_000 as u64) - // Standard Error: 2_602 - .saturating_add(Weight::from_ref_time(1_286_177 as u64).saturating_mul(n as u64)) - // Standard Error: 1_019 - .saturating_add(Weight::from_ref_time(70_947 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (0 as Weight) + // Standard Error: 16_000 + .saturating_add((2_673_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 7_000 + .saturating_add((224_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[0, 100]`. - /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - // Minimum execution time: 17_446 nanoseconds. - Weight::from_ref_time(17_725_000 as u64) - // Standard Error: 163_579 - .saturating_add(Weight::from_ref_time(12_823_232 as u64).saturating_mul(n as u64)) - // Standard Error: 64_064 - .saturating_add(Weight::from_ref_time(496_642 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (0 as Weight) + // Standard Error: 343_000 + .saturating_add((59_025_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 153_000 + .saturating_add((6_725_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 7e750f7618437..9e98d4e15aed4 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -39,7 +39,7 @@ std = [ "sp-runtime/std", "frame-support/std", "frame-system/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 2bde2b0c98945..ca88899edf842 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -37,7 +37,7 @@ const SEED: u32 = 0; fn create_default_asset, I: 'static>( is_sufficient: bool, -) -> (T::AccountId, AccountIdLookupOf) { +) -> (T::AccountId, ::Source) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let root = SystemOrigin::Root.into(); @@ -55,7 +55,7 @@ fn create_default_asset, I: 'static>( fn create_default_minted_asset, I: 'static>( is_sufficient: bool, amount: T::Balance, -) -> (T::AccountId, AccountIdLookupOf) { +) -> (T::AccountId, ::Source) { let (caller, caller_lookup) = create_default_asset::(is_sufficient); if !is_sufficient { T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); @@ -140,24 +140,22 @@ fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { } } -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn assert_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_has_event(generic_event.into()); } benchmarks_instance_pallet! { create { - let asset_id = Default::default(); - let origin = T::CreateOrigin::successful_origin(&asset_id); - let caller = T::CreateOrigin::ensure_origin(origin, &asset_id).unwrap(); + let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - }: _(SystemOrigin::Signed(caller.clone()), asset_id, caller_lookup, 1u32.into()) + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1u32.into()) verify { - assert_last_event::(Event::Created { asset_id, creator: caller.clone(), owner: caller }.into()); + assert_last_event::(Event::Created { asset_id: Default::default(), creator: caller.clone(), owner: caller }.into()); } force_create { diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index 3524efb302ace..bd55855d388d4 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -272,23 +272,3 @@ impl, I: 'static> fungibles::InspectEnumerable for Pa Box::new(Asset::::iter_keys()) } } - -impl, I: 'static> fungibles::roles::Inspect<::AccountId> - for Pallet -{ - fn owner(asset: T::AssetId) -> Option<::AccountId> { - Asset::::get(asset).map(|x| x.owner) - } - - fn issuer(asset: T::AssetId) -> Option<::AccountId> { - Asset::::get(asset).map(|x| x.issuer) - } - - fn admin(asset: T::AssetId) -> Option<::AccountId> { - Asset::::get(asset).map(|x| x.admin) - } - - fn freezer(asset: T::AssetId) -> Option<::AccountId> { - Asset::::get(asset).map(|x| x.freezer) - } -} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 7e7d68fa6c7dd..e0b00c5642c81 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -36,15 +36,15 @@ //! //! ### Terminology //! -//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and its +//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and it's //! assets, as well as forcibly transfer a particular class of assets between arbitrary accounts //! and reduce the balance of a particular class of assets of arbitrary accounts. //! * **Asset issuance/minting**: The creation of a new asset, whose total supply will belong to the -//! account designated as the beneficiary of the asset. This is a privileged operation. +//! account that issues the asset. This is a privileged operation. //! * **Asset transfer**: The reduction of the balance of an asset of one account with the //! corresponding increase in the balance of another. -//! * **Asset destruction**: The process of reducing the balance of an asset of one account. This is -//! a privileged operation. +//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is a +//! privileged operation. //! * **Fungible asset**: An asset whose units are interchangeable. //! * **Issuer**: An account ID uniquely privileged to be able to mint a particular class of assets. //! * **Freezer**: An account ID uniquely privileged to be able to freeze an account from @@ -63,12 +63,12 @@ //! //! The assets system in Substrate is designed to make the following possible: //! -//! * Issue new assets in a permissioned or permissionless way, if permissionless, then with a +//! * Issue a new assets in a permissioned or permissionless way, if permissionless, then with a //! deposit required. //! * Allow accounts to be delegated the ability to transfer assets without otherwise existing //! on-chain (*approvals*). //! * Move assets between accounts. -//! * Update an asset class's total supply. +//! * Update the asset's total supply. //! * Allow administrative activities by specially privileged accounts including freezing account //! balances and minting/burning assets. //! @@ -92,7 +92,6 @@ //! * `force_cancel_approval`: Rescind a previous approval. //! //! ### Privileged Functions -//! //! * `destroy`: Destroys an entire asset class; called by the asset class's Owner. //! * `mint`: Increases the asset balance of an account; called by the asset class's Issuer. //! * `burn`: Decreases the asset balance of an account; called by the asset class's Admin. @@ -157,7 +156,7 @@ use frame_support::{ traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, - Currency, EnsureOriginWithArg, ReservableCurrency, StoredMap, + Currency, ReservableCurrency, StoredMap, }, }; use frame_system::Config as SystemConfig; @@ -165,8 +164,6 @@ use frame_system::Config as SystemConfig; pub use pallet::*; pub use weights::WeightInfo; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -181,8 +178,7 @@ pub mod pallet { /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The units in which we record balances. type Balance: Member @@ -207,17 +203,9 @@ pub mod pallet { /// The currency mechanism. type Currency: ReservableCurrency; - /// Standard asset class creation is only allowed if the origin attempting it and the - /// asset class are in this set. - type CreateOrigin: EnsureOriginWithArg< - Self::RuntimeOrigin, - Self::AssetId, - Success = Self::AccountId, - >; - /// The origin which may forcibly create or destroy an asset or otherwise alter privileged /// attributes. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The basic amount of funds that must be reserved for an asset. #[pallet::constant] @@ -494,7 +482,7 @@ pub mod pallet { /// /// This new asset class has no assets initially and its owner is the origin. /// - /// The origin must conform to the configured `CreateOrigin` and have sufficient funds free. + /// The origin must be Signed and the sender must have sufficient funds free. /// /// Funds of sender are reserved by `AssetDeposit`. /// @@ -513,10 +501,10 @@ pub mod pallet { pub fn create( origin: OriginFor, #[pallet::compact] id: T::AssetId, - admin: AccountIdLookupOf, + admin: ::Source, min_balance: T::Balance, ) -> DispatchResult { - let owner = T::CreateOrigin::ensure_origin(origin, &id)?; + let owner = ensure_signed(origin)?; let admin = T::Lookup::lookup(admin)?; ensure!(!Asset::::contains_key(id), Error::::InUse); @@ -569,7 +557,7 @@ pub mod pallet { pub fn force_create( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: AccountIdLookupOf, + owner: ::Source, is_sufficient: bool, #[pallet::compact] min_balance: T::Balance, ) -> DispatchResult { @@ -635,7 +623,7 @@ pub mod pallet { pub fn mint( origin: OriginFor, #[pallet::compact] id: T::AssetId, - beneficiary: AccountIdLookupOf, + beneficiary: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -663,7 +651,7 @@ pub mod pallet { pub fn burn( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: AccountIdLookupOf, + who: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -696,7 +684,7 @@ pub mod pallet { pub fn transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - target: AccountIdLookupOf, + target: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -728,7 +716,7 @@ pub mod pallet { pub fn transfer_keep_alive( origin: OriginFor, #[pallet::compact] id: T::AssetId, - target: AccountIdLookupOf, + target: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let source = ensure_signed(origin)?; @@ -761,8 +749,8 @@ pub mod pallet { pub fn force_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - source: AccountIdLookupOf, - dest: AccountIdLookupOf, + source: ::Source, + dest: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -787,7 +775,7 @@ pub mod pallet { pub fn freeze( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: AccountIdLookupOf, + who: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -818,7 +806,7 @@ pub mod pallet { pub fn thaw( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: AccountIdLookupOf, + who: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -903,7 +891,7 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: AccountIdLookupOf, + owner: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -944,9 +932,9 @@ pub mod pallet { pub fn set_team( origin: OriginFor, #[pallet::compact] id: T::AssetId, - issuer: AccountIdLookupOf, - admin: AccountIdLookupOf, - freezer: AccountIdLookupOf, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let issuer = T::Lookup::lookup(issuer)?; @@ -1129,10 +1117,10 @@ pub mod pallet { pub fn force_asset_status( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: AccountIdLookupOf, - issuer: AccountIdLookupOf, - admin: AccountIdLookupOf, - freezer: AccountIdLookupOf, + owner: ::Source, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, #[pallet::compact] min_balance: T::Balance, is_sufficient: bool, is_frozen: bool, @@ -1179,7 +1167,7 @@ pub mod pallet { pub fn approve_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - delegate: AccountIdLookupOf, + delegate: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let owner = ensure_signed(origin)?; @@ -1204,7 +1192,7 @@ pub mod pallet { pub fn cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, - delegate: AccountIdLookupOf, + delegate: ::Source, ) -> DispatchResult { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; @@ -1237,8 +1225,8 @@ pub mod pallet { pub fn force_cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: AccountIdLookupOf, - delegate: AccountIdLookupOf, + owner: ::Source, + delegate: ::Source, ) -> DispatchResult { let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; T::ForceOrigin::try_origin(origin) @@ -1284,8 +1272,8 @@ pub mod pallet { pub fn transfer_approved( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: AccountIdLookupOf, - destination: AccountIdLookupOf, + owner: ::Source, + destination: ::Source, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let delegate = ensure_signed(origin)?; diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 21fb52c9cd931..67690e2b28ec1 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -21,8 +21,8 @@ use super::*; use crate as pallet_assets; use frame_support::{ - construct_runtime, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32, ConstU64, GenesisBuild}, + construct_runtime, + traits::{ConstU32, ConstU64, GenesisBuild}, }; use sp_core::H256; use sp_runtime::{ @@ -49,8 +49,8 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -75,7 +75,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -85,11 +85,10 @@ impl pallet_balances::Config for Test { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Balance = u64; type AssetId = u32; type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; type AssetDeposit = ConstU64<1>; type AssetAccountDeposit = ConstU64<10>; @@ -102,49 +101,44 @@ impl Config for Test { type Extra = (); } -use std::collections::HashMap; +use std::{cell::RefCell, collections::HashMap}; #[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum Hook { +pub(crate) enum Hook { Died(u32, u64), } -parameter_types! { - static Frozen: HashMap<(u32, u64), u64> = Default::default(); - static Hooks: Vec = Default::default(); +thread_local! { + static FROZEN: RefCell> = RefCell::new(Default::default()); + static HOOKS: RefCell> = RefCell::new(Default::default()); } pub struct TestFreezer; impl FrozenBalance for TestFreezer { fn frozen_balance(asset: u32, who: &u64) -> Option { - Frozen::get().get(&(asset, *who)).cloned() + FROZEN.with(|f| f.borrow().get(&(asset, who.clone())).cloned()) } fn died(asset: u32, who: &u64) { - Hooks::mutate(|v| v.push(Hook::Died(asset, *who))); - + HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, who.clone()))); // Sanity check: dead accounts have no balance. assert!(Assets::balance(asset, *who).is_zero()); } } pub(crate) fn set_frozen_balance(asset: u32, who: u64, amount: u64) { - Frozen::mutate(|v| { - v.insert((asset, who), amount); - }); + FROZEN.with(|f| f.borrow_mut().insert((asset, who), amount)); } pub(crate) fn clear_frozen_balance(asset: u32, who: u64) { - Frozen::mutate(|v| { - v.remove(&(asset, who)); - }); + FROZEN.with(|f| f.borrow_mut().remove(&(asset, who))); } pub(crate) fn hooks() -> Vec { - Hooks::get().clone() + HOOKS.with(|h| h.borrow().clone()) } pub(crate) fn take_hooks() -> Vec { - Hooks::take() + HOOKS.with(|h| h.take()) } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 48cfad45a49fc..50ab04111edff 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -26,10 +26,10 @@ use sp_runtime::{traits::ConvertInto, TokenError}; #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 2), 100); }); } @@ -37,46 +37,46 @@ fn basic_minting_should_work() { #[test] fn minting_too_many_insufficient_assets_fails() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 1, 1, false, 1)); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 2, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 1, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 2, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 1, 1, 100)); - assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 1, 1, 100)); + assert_noop!(Assets::mint(Origin::signed(1), 2, 1, 100), TokenError::CannotCreate); Balances::make_free_balance_be(&2, 1); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 100)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 2, 1, 100)); }); } #[test] fn minting_insufficient_asset_with_deposit_should_work_when_consumers_exhausted() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 1, 1, false, 1)); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 2, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 1, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 2, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 1, 1, 100)); - assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 1, 1, 100)); + assert_noop!(Assets::mint(Origin::signed(1), 2, 1, 100), TokenError::CannotCreate); - assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 2)); + assert_ok!(Assets::touch(Origin::signed(1), 2)); assert_eq!(Balances::reserved_balance(&1), 10); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 2, 1, 100)); }); } #[test] fn minting_insufficient_assets_with_deposit_without_consumer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); - assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::touch(Origin::signed(1), 0)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Balances::reserved_balance(&1), 10); assert_eq!(System::consumers(&1), 0); }); @@ -85,11 +85,11 @@ fn minting_insufficient_assets_with_deposit_without_consumer_should_work() { #[test] fn refunding_asset_deposit_with_burn_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_ok!(Assets::refund(RuntimeOrigin::signed(1), 0, true)); + assert_ok!(Assets::touch(Origin::signed(1), 0)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::refund(Origin::signed(1), 0, true)); assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Assets::balance(1, 0), 0); }); @@ -98,28 +98,28 @@ fn refunding_asset_deposit_with_burn_should_work() { #[test] fn refunding_asset_deposit_with_burn_disallowed_should_fail() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_noop!(Assets::refund(RuntimeOrigin::signed(1), 0, false), Error::::WouldBurn); + assert_ok!(Assets::touch(Origin::signed(1), 0)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_noop!(Assets::refund(Origin::signed(1), 0, false), Error::::WouldBurn); }); } #[test] fn refunding_asset_deposit_without_burn_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); - assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::touch(Origin::signed(1), 0)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&2, 100); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 100)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 2), 100); assert_eq!(Assets::balance(0, 1), 0); assert_eq!(Balances::reserved_balance(&1), 10); - assert_ok!(Assets::refund(RuntimeOrigin::signed(1), 0, false)); + assert_ok!(Assets::refund(Origin::signed(1), 0, false)); assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Assets::balance(1, 0), 0); }); @@ -129,11 +129,11 @@ fn refunding_asset_deposit_without_burn_should_work() { #[test] fn refunding_calls_died_hook() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_ok!(Assets::refund(RuntimeOrigin::signed(1), 0, true)); + assert_ok!(Assets::touch(Origin::signed(1), 0)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::refund(Origin::signed(1), 0, true)); assert_eq!(Asset::::get(0).unwrap().accounts, 0); assert_eq!(hooks(), vec![Hook::Died(0, 1)]); @@ -144,20 +144,17 @@ fn refunding_calls_died_hook() { fn approval_lifecycle_works() { new_test_ext().execute_with(|| { // can't approve non-existent token - assert_noop!( - Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), - Error::::Unknown - ); + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); // so we create it :) - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); - assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 40)); + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 40)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_ok!(Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2)); + assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_eq!(Assets::balance(0, 1), 60); assert_eq!(Assets::balance(0, 3), 40); @@ -169,20 +166,17 @@ fn approval_lifecycle_works() { fn transfer_approved_all_funds() { new_test_ext().execute_with(|| { // can't approve non-existent token - assert_noop!( - Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), - Error::::Unknown - ); + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); // so we create it :) - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); // transfer the full amount, which should trigger auto-cleanup - assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 50)); + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 3), 50); @@ -193,20 +187,20 @@ fn transfer_approved_all_funds() { #[test] fn approval_deposits_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); let e = BalancesError::::InsufficientBalance; - assert_noop!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), e); + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), e); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Balances::reserved_balance(&1), 1); - assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 50)); + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); assert_eq!(Balances::reserved_balance(&1), 0); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); - assert_ok!(Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); assert_eq!(Balances::reserved_balance(&1), 0); }); } @@ -214,84 +208,72 @@ fn approval_deposits_work() { #[test] fn cannot_transfer_more_than_approved() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); let e = Error::::Unapproved; - assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 51), e); + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 51), e); }); } #[test] fn cannot_transfer_more_than_exists() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 101)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 101)); let e = Error::::BalanceLow; - assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 101), e); + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 101), e); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_noop!( - Assets::cancel_approval(RuntimeOrigin::signed(1), 1, 2), - Error::::Unknown - ); - assert_noop!( - Assets::cancel_approval(RuntimeOrigin::signed(2), 0, 2), - Error::::Unknown - ); - assert_noop!( - Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 3), - Error::::Unknown - ); + assert_noop!(Assets::cancel_approval(Origin::signed(1), 1, 2), Error::::Unknown); + assert_noop!(Assets::cancel_approval(Origin::signed(2), 0, 2), Error::::Unknown); + assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 3), Error::::Unknown); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_ok!(Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2)); + assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); - assert_noop!( - Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2), - Error::::Unknown - ); + assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 2), Error::::Unknown); }); } #[test] fn force_cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); let e = Error::::NoPermission; - assert_noop!(Assets::force_cancel_approval(RuntimeOrigin::signed(2), 0, 1, 2), e); + assert_noop!(Assets::force_cancel_approval(Origin::signed(2), 0, 1, 2), e); assert_noop!( - Assets::force_cancel_approval(RuntimeOrigin::signed(1), 1, 1, 2), + Assets::force_cancel_approval(Origin::signed(1), 1, 1, 2), Error::::Unknown ); assert_noop!( - Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 2, 2), + Assets::force_cancel_approval(Origin::signed(1), 0, 2, 2), Error::::Unknown ); assert_noop!( - Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 1, 3), + Assets::force_cancel_approval(Origin::signed(1), 0, 1, 3), Error::::Unknown ); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_ok!(Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 1, 2)); + assert_ok!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_noop!( - Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 1, 2), + Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2), Error::::Unknown ); }); @@ -301,42 +283,42 @@ fn force_cancel_approval_works() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); assert_eq!(Balances::reserved_balance(&1), 1); assert!(Asset::::contains_key(0)); - assert_ok!(Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0], vec![0], 12)); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); assert_eq!(Balances::reserved_balance(&1), 4); assert!(Metadata::::contains_key(0)); Balances::make_free_balance_be(&10, 100); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); Balances::make_free_balance_be(&20, 100); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 20, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); assert_eq!(Account::::iter_prefix(0).count(), 2); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Asset::::contains_key(0)); assert!(!Metadata::::contains_key(0)); assert_eq!(Account::::iter_prefix(0).count(), 0); - assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); assert_eq!(Balances::reserved_balance(&1), 1); assert!(Asset::::contains_key(0)); - assert_ok!(Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0], vec![0], 12)); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); assert_eq!(Balances::reserved_balance(&1), 4); assert!(Metadata::::contains_key(0)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 20, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); assert_eq!(Account::::iter_prefix(0).count(), 2); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(RuntimeOrigin::root(), 0, w)); + assert_ok!(Assets::destroy(Origin::root(), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Asset::::contains_key(0)); @@ -349,15 +331,15 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); let mut w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); // witness too low - assert_noop!(Assets::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); + assert_noop!(Assets::destroy(Origin::signed(1), 0, w), Error::::BadWitness); // witness too high is okay though w.accounts += 2; w.sufficients += 2; - assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); }); } @@ -365,15 +347,15 @@ fn destroy_with_bad_witness_should_not_work() { fn destroy_should_refund_approvals() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 3, 50)); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 4, 50)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 3, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 4, 50)); assert_eq!(Balances::reserved_balance(&1), 3); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); // all approvals are removed @@ -384,72 +366,69 @@ fn destroy_should_refund_approvals() { #[test] fn non_providing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&0, 100); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 0, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); // Cannot mint into account 2 since it doesn't (yet) exist... - assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100), TokenError::CannotCreate); + assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); // ...or transfer... - assert_noop!( - Assets::transfer(RuntimeOrigin::signed(0), 0, 1, 50), - TokenError::CannotCreate - ); + assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), TokenError::CannotCreate); // ...or force-transfer assert_noop!( - Assets::force_transfer(RuntimeOrigin::signed(1), 0, 0, 1, 50), + Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), TokenError::CannotCreate ); Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(0), 0, 1, 25)); - assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 0, 2, 25)); + assert_ok!(Assets::transfer(Origin::signed(0), 0, 1, 25)); + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 0, 2, 25)); }); } #[test] fn min_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 10)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Asset::::get(0).unwrap().accounts, 1); // Cannot create a new account with a balance that is below minimum... - assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 9), TokenError::BelowMinimum); - assert_noop!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); assert_noop!( - Assets::force_transfer(RuntimeOrigin::signed(1), 0, 1, 2, 9), + Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), TokenError::BelowMinimum ); // When deducting from an account to below minimum, it should be reaped. // Death by `transfer`. - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 91)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); assert!(Assets::maybe_balance(0, 1).is_none()); assert_eq!(Assets::balance(0, 2), 100); assert_eq!(Asset::::get(0).unwrap().accounts, 1); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); // Death by `force_transfer`. - assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 2, 1, 91)); + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 2, 1, 91)); assert!(Assets::maybe_balance(0, 2).is_none()); assert_eq!(Assets::balance(0, 1), 100); assert_eq!(Asset::::get(0).unwrap().accounts, 1); assert_eq!(take_hooks(), vec![Hook::Died(0, 2)]); // Death by `burn`. - assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, 91)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 91)); assert!(Assets::maybe_balance(0, 1).is_none()); assert_eq!(Asset::::get(0).unwrap().accounts, 0); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); // Death by `transfer_approved`. - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 100)); - assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 91)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 91)); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); }); } @@ -457,17 +436,17 @@ fn min_balance_should_work() { #[test] fn querying_total_supply_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 3, 31)); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 19); assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 3, u64::MAX)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::MAX)); assert_eq!(Assets::total_supply(0), 69); }); } @@ -475,10 +454,10 @@ fn querying_total_supply_should_work() { #[test] fn transferring_amount_below_available_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); }); @@ -487,14 +466,14 @@ fn transferring_amount_below_available_balance_should_work() { #[test] fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 10)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_noop!( - Assets::transfer_keep_alive(RuntimeOrigin::signed(1), 0, 2, 91), + Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), Error::::BalanceLow ); - assert_ok!(Assets::transfer_keep_alive(RuntimeOrigin::signed(1), 0, 2, 90)); + assert_ok!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 90)); assert_eq!(Assets::balance(0, 1), 10); assert_eq!(Assets::balance(0, 2), 90); assert!(hooks().is_empty()); @@ -504,26 +483,26 @@ fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { #[test] fn transferring_frozen_user_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::freeze(RuntimeOrigin::signed(1), 0, 1)); - assert_noop!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50), Error::::Frozen); - assert_ok!(Assets::thaw(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 1)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw(Origin::signed(1), 0, 1)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); }); } #[test] fn transferring_frozen_asset_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::freeze_asset(RuntimeOrigin::signed(1), 0)); - assert_noop!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50), Error::::Frozen); - assert_ok!(Assets::thaw_asset(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); }); } @@ -531,48 +510,36 @@ fn transferring_frozen_asset_should_not_work() { fn approve_transfer_frozen_asset_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::freeze_asset(RuntimeOrigin::signed(1), 0)); - assert_noop!( - Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), - Error::::Frozen - ); - assert_ok!(Assets::thaw_asset(RuntimeOrigin::signed(1), 0)); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); }); } #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_noop!( - Assets::transfer_ownership(RuntimeOrigin::signed(2), 0, 2), - Error::::NoPermission - ); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_noop!( - Assets::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), + Assets::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission ); - assert_noop!(Assets::freeze(RuntimeOrigin::signed(2), 0, 1), Error::::NoPermission); - assert_noop!(Assets::thaw(RuntimeOrigin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Assets::freeze(Origin::signed(2), 0, 1), Error::::NoPermission); + assert_noop!(Assets::thaw(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::mint(Origin::signed(2), 0, 2, 100), Error::::NoPermission); + assert_noop!(Assets::burn(Origin::signed(2), 0, 1, 100), Error::::NoPermission); assert_noop!( - Assets::mint(RuntimeOrigin::signed(2), 0, 2, 100), - Error::::NoPermission - ); - assert_noop!( - Assets::burn(RuntimeOrigin::signed(2), 0, 1, 100), - Error::::NoPermission - ); - assert_noop!( - Assets::force_transfer(RuntimeOrigin::signed(2), 0, 1, 2, 100), + Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), Error::::NoPermission ); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_noop!(Assets::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); + assert_noop!(Assets::destroy(Origin::signed(2), 0, w), Error::::NoPermission); }); } @@ -581,28 +548,22 @@ fn transfer_owner_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); - assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); assert_eq!(Balances::reserved_balance(&1), 1); - assert_ok!(Assets::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); + assert_ok!(Assets::transfer_ownership(Origin::signed(1), 0, 2)); assert_eq!(Balances::reserved_balance(&2), 1); assert_eq!(Balances::reserved_balance(&1), 0); assert_noop!( - Assets::transfer_ownership(RuntimeOrigin::signed(1), 0, 1), + Assets::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission ); // Set metadata now and make sure that deposit gets transferred back. - assert_ok!(Assets::set_metadata( - RuntimeOrigin::signed(2), - 0, - vec![0u8; 10], - vec![0u8; 10], - 12 - )); - assert_ok!(Assets::transfer_ownership(RuntimeOrigin::signed(2), 0, 1)); + assert_ok!(Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12)); + assert_ok!(Assets::transfer_ownership(Origin::signed(2), 0, 1)); assert_eq!(Balances::reserved_balance(&1), 22); assert_eq!(Balances::reserved_balance(&2), 0); }); @@ -611,27 +572,27 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); - - assert_ok!(Assets::mint(RuntimeOrigin::signed(2), 0, 2, 100)); - assert_ok!(Assets::freeze(RuntimeOrigin::signed(4), 0, 2)); - assert_ok!(Assets::thaw(RuntimeOrigin::signed(3), 0, 2)); - assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(3), 0, 2, 3, 100)); - assert_ok!(Assets::burn(RuntimeOrigin::signed(3), 0, 3, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Assets::mint(Origin::signed(2), 0, 2, 100)); + assert_ok!(Assets::freeze(Origin::signed(4), 0, 2)); + assert_ok!(Assets::thaw(Origin::signed(3), 0, 2)); + assert_ok!(Assets::force_transfer(Origin::signed(3), 0, 2, 3, 100)); + assert_ok!(Assets::burn(Origin::signed(3), 0, 3, 100)); }); } #[test] fn transferring_to_frozen_account_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_eq!(Assets::balance(0, 2), 100); - assert_ok!(Assets::freeze(RuntimeOrigin::signed(1), 0, 2)); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 2)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 2), 150); }); } @@ -639,32 +600,26 @@ fn transferring_to_frozen_account_should_work() { #[test] fn transferring_amount_more_than_available_balance_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, u64::MAX)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); assert_eq!(Assets::balance(0, 1), 0); - assert_noop!( - Assets::transfer(RuntimeOrigin::signed(1), 0, 1, 50), - Error::::NoAccount - ); - assert_noop!( - Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 51), - Error::::BalanceLow - ); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::NoAccount); + assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 51), Error::::BalanceLow); }); } #[test] fn transferring_less_than_one_unit_is_fine() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 0)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 0)); // `ForceCreated` and `Issued` but no `Transferred` event. assert_eq!(System::events().len(), 2); }); @@ -673,23 +628,20 @@ fn transferring_less_than_one_unit_is_fine() { #[test] fn transferring_more_units_than_total_supply_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_noop!( - Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 101), - Error::::BalanceLow - ); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); }); } #[test] fn burning_asset_balance_with_positive_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, u64::MAX)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); assert_eq!(Assets::balance(0, 1), 0); }); } @@ -697,13 +649,10 @@ fn burning_asset_balance_with_positive_balance_should_work() { #[test] fn burning_asset_balance_with_zero_balance_does_nothing() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_noop!( - Assets::burn(RuntimeOrigin::signed(1), 0, 2, u64::MAX), - Error::::NoAccount - ); + assert_noop!(Assets::burn(Origin::signed(1), 0, 2, u64::MAX), Error::::NoAccount); assert_eq!(Assets::balance(0, 2), 0); assert_eq!(Assets::total_supply(0), 100); }); @@ -714,69 +663,48 @@ fn set_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown asset assert_noop!( - Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), Error::::Unknown, ); - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); // Cannot add metadata to unowned asset assert_noop!( - Assets::set_metadata(RuntimeOrigin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), + Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), Error::::NoPermission, ); // Cannot add oversized metadata assert_noop!( - Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), Error::::BadMetadata, ); assert_noop!( - Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), Error::::BadMetadata, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Assets::set_metadata( - RuntimeOrigin::signed(1), - 0, - vec![0u8; 10], - vec![0u8; 10], - 12 - )); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12)); assert_eq!(Balances::free_balance(&1), 9); // Update deposit - assert_ok!(Assets::set_metadata( - RuntimeOrigin::signed(1), - 0, - vec![0u8; 10], - vec![0u8; 5], - 12 - )); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 5], 12)); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Assets::set_metadata( - RuntimeOrigin::signed(1), - 0, - vec![0u8; 10], - vec![0u8; 15], - 12 - )); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 15], 12)); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), BalancesError::::InsufficientBalance, ); // Clear Metadata assert!(Metadata::::contains_key(0)); - assert_noop!( - Assets::clear_metadata(RuntimeOrigin::signed(2), 0), - Error::::NoPermission - ); - assert_noop!(Assets::clear_metadata(RuntimeOrigin::signed(1), 1), Error::::Unknown); - assert_ok!(Assets::clear_metadata(RuntimeOrigin::signed(1), 0)); + assert_noop!(Assets::clear_metadata(Origin::signed(2), 0), Error::::NoPermission); + assert_noop!(Assets::clear_metadata(Origin::signed(1), 1), Error::::Unknown); + assert_ok!(Assets::clear_metadata(Origin::signed(1), 0)); assert!(!Metadata::::contains_key(0)); }); } @@ -785,13 +713,13 @@ fn set_metadata_should_work() { #[test] fn destroy_calls_died_hooks() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 50)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 50)); // Create account 1 and 2. - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); // Destroy the asset. let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); // Asset is gone and accounts 1 and 2 died. assert!(Asset::::get(0).is_none()); @@ -802,39 +730,36 @@ fn destroy_calls_died_hooks() { #[test] fn freezer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 10)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); // freeze 50 of it. set_frozen_balance(0, 1, 50); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 20)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 20)); // cannot transfer another 21 away as this would take the non-frozen balance (30) to below // the minimum balance (10). - assert_noop!( - Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 21), - Error::::BalanceLow - ); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 21), Error::::BalanceLow); // create an approved transfer... Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); let e = Error::::BalanceLow; // ...but that wont work either: - assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 2, 21), e); + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 2, 21), e); // a force transfer won't work also. let e = Error::::BalanceLow; - assert_noop!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 1, 2, 21), e); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21), e); // reduce it to only 49 frozen... set_frozen_balance(0, 1, 49); // ...and it's all good: - assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 1, 2, 21)); + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21)); // and if we clear it, we can remove the account completely. clear_frozen_balance(0, 1); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(hooks(), vec![Hook::Died(0, 1)]); }); } @@ -844,7 +769,7 @@ fn imbalances_should_work() { use frame_support::traits::tokens::fungibles::Balanced; new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); let imb = Assets::issue(0, 100); assert_eq!(Assets::total_supply(0), 100); @@ -867,9 +792,9 @@ fn imbalances_should_work() { fn force_metadata_should_work() { new_test_ext().execute_with(|| { // force set metadata works - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::force_set_metadata( - RuntimeOrigin::root(), + Origin::root(), 0, vec![0u8; 10], vec![0u8; 10], @@ -881,7 +806,7 @@ fn force_metadata_should_work() { // overwrites existing metadata let asset_original_metadata = Metadata::::get(0); assert_ok!(Assets::force_set_metadata( - RuntimeOrigin::root(), + Origin::root(), 0, vec![1u8; 10], vec![1u8; 10], @@ -892,14 +817,7 @@ fn force_metadata_should_work() { // attempt to set metadata for non-existent asset class assert_noop!( - Assets::force_set_metadata( - RuntimeOrigin::root(), - 1, - vec![0u8; 10], - vec![0u8; 10], - 8, - false - ), + Assets::force_set_metadata(Origin::root(), 1, vec![0u8; 10], vec![0u8; 10], 8, false), Error::::Unknown ); @@ -907,7 +825,7 @@ fn force_metadata_should_work() { let limit = 50usize; assert_noop!( Assets::force_set_metadata( - RuntimeOrigin::root(), + Origin::root(), 0, vec![0u8; limit + 1], vec![0u8; 10], @@ -918,7 +836,7 @@ fn force_metadata_should_work() { ); assert_noop!( Assets::force_set_metadata( - RuntimeOrigin::root(), + Origin::root(), 0, vec![0u8; 10], vec![0u8; limit + 1], @@ -930,14 +848,11 @@ fn force_metadata_should_work() { // force clear metadata works assert!(Metadata::::contains_key(0)); - assert_ok!(Assets::force_clear_metadata(RuntimeOrigin::root(), 0)); + assert_ok!(Assets::force_clear_metadata(Origin::root(), 0)); assert!(!Metadata::::contains_key(0)); // Error handles clearing non-existent asset class - assert_noop!( - Assets::force_clear_metadata(RuntimeOrigin::root(), 1), - Error::::Unknown - ); + assert_noop!(Assets::force_clear_metadata(Origin::root(), 1), Error::::Unknown); }); } @@ -946,57 +861,34 @@ fn force_asset_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 10); Balances::make_free_balance_be(&2, 10); - assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 30)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 50)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 150)); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 30)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 50)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 150)); // force asset status to change min_balance > balance - assert_ok!(Assets::force_asset_status( - RuntimeOrigin::root(), - 0, - 1, - 1, - 1, - 1, - 100, - true, - false - )); + assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 100, true, false)); assert_eq!(Assets::balance(0, 1), 50); // account can recieve assets for balance < min_balance - assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 1)); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 1)); assert_eq!(Assets::balance(0, 1), 51); // account on outbound transfer will cleanup for balance < min_balance - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 1)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 1)); assert_eq!(Assets::balance(0, 1), 0); // won't create new account with balance below min_balance - assert_noop!( - Assets::transfer(RuntimeOrigin::signed(2), 0, 3, 50), - TokenError::BelowMinimum - ); + assert_noop!(Assets::transfer(Origin::signed(2), 0, 3, 50), TokenError::BelowMinimum); // force asset status will not execute for non-existent class assert_noop!( - Assets::force_asset_status(RuntimeOrigin::root(), 1, 1, 1, 1, 1, 90, true, false), + Assets::force_asset_status(Origin::root(), 1, 1, 1, 1, 1, 90, true, false), Error::::Unknown ); // account drains to completion when funds dip below min_balance - assert_ok!(Assets::force_asset_status( - RuntimeOrigin::root(), - 0, - 1, - 1, - 1, - 1, - 110, - true, - false - )); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 110)); + assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 110, true, false)); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 110)); assert_eq!(Assets::balance(0, 1), 200); assert_eq!(Assets::balance(0, 2), 0); assert_eq!(Assets::total_supply(0), 200); @@ -1009,9 +901,9 @@ fn balance_conversion_should_work() { use frame_support::traits::tokens::BalanceConversion; let id = 42; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), id, 1, true, 10)); + assert_ok!(Assets::force_create(Origin::root(), id, 1, true, 10)); let not_sufficient = 23; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), not_sufficient, 1, false, 10)); + assert_ok!(Assets::force_create(Origin::root(), not_sufficient, 1, false, 10)); assert_eq!( BalanceToAssetBalance::::to_asset_balance(100, 1234), @@ -1046,9 +938,9 @@ fn assets_from_genesis_should_exist() { fn querying_name_symbol_and_decimals_should_work() { new_test_ext().execute_with(|| { use frame_support::traits::tokens::fungibles::metadata::Inspect; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::force_set_metadata( - RuntimeOrigin::root(), + Origin::root(), 0, vec![0u8; 10], vec![1u8; 10], @@ -1065,8 +957,8 @@ fn querying_name_symbol_and_decimals_should_work() { fn querying_allowance_should_work() { new_test_ext().execute_with(|| { use frame_support::traits::tokens::fungibles::approvals::{Inspect, Mutate}; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); assert_ok!(Assets::approve(0, &1, &2, 50)); assert_eq!(Assets::allowance(0, &1, &2), 50); @@ -1080,30 +972,8 @@ fn querying_allowance_should_work() { fn transfer_large_asset() { new_test_ext().execute_with(|| { let amount = u64::pow(2, 63) + 2; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, amount)); - assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, amount - 1)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, amount)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, amount - 1)); }) } - -#[test] -fn querying_roles_should_work() { - new_test_ext().execute_with(|| { - use frame_support::traits::tokens::fungibles::roles::Inspect; - assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); - assert_ok!(Assets::set_team( - RuntimeOrigin::signed(1), - 0, - // Issuer - 2, - // Admin - 3, - // Freezer - 4, - )); - assert_eq!(Assets::owner(0), Some(1)); - assert_eq!(Assets::issuer(0), Some(2)); - assert_eq!(Assets::admin(0), Some(3)); - assert_eq!(Assets::freezer(0), Some(4)); - }); -} diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index 677fc5847c614..2e8a1f911fb0f 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -165,7 +165,7 @@ pub trait FrozenBalance { /// /// Under normal behaviour, the account balance should not go below the sum of this (if `Some`) /// and the asset's minimum balance. However, the account balance may reasonably begin below - /// this sum (e.g. if less than the sum had ever been transferred into the account). + /// this sum (e.g. if less than the sum had ever been transfered into the account). /// /// In special cases (privileged intervention) the account balance may also go below the sum. /// diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index 3b29e55b306fe..e8f1184cf570f 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/assets/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/assets/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -77,212 +74,184 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - // Minimum execution time: 33_241 nanoseconds. - Weight::from_ref_time(33_873_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (27_167_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - // Minimum execution time: 19_883 nanoseconds. - Weight::from_ref_time(20_651_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (15_473_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) // Storage: System Account (r:5000 w:5000) // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) - /// The range of component `c` is `[0, 5000]`. - /// The range of component `s` is `[0, 5000]`. - /// The range of component `a` is `[0, 500]`. fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - // Minimum execution time: 76_222_544 nanoseconds. - Weight::from_ref_time(76_864_587_000 as u64) - // Standard Error: 127_086 - .saturating_add(Weight::from_ref_time(8_645_143 as u64).saturating_mul(c as u64)) - // Standard Error: 127_086 - .saturating_add(Weight::from_ref_time(11_281_301 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(c as u64))) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(a as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(c as u64))) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) + (0 as Weight) + // Standard Error: 37_000 + .saturating_add((17_145_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 37_000 + .saturating_add((19_333_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 375_000 + .saturating_add((17_046_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - // Minimum execution time: 36_782 nanoseconds. - Weight::from_ref_time(37_340_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (30_819_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - // Minimum execution time: 44_425 nanoseconds. - Weight::from_ref_time(45_485_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (35_212_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 58_294 nanoseconds. - Weight::from_ref_time(59_447_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (47_401_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - // Minimum execution time: 46_704 nanoseconds. - Weight::from_ref_time(47_521_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (42_300_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - // Minimum execution time: 57_647 nanoseconds. - Weight::from_ref_time(58_417_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (47_946_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - // Minimum execution time: 26_827 nanoseconds. - Weight::from_ref_time(27_373_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (21_670_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - // Minimum execution time: 26_291 nanoseconds. - Weight::from_ref_time(26_854_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (21_503_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - // Minimum execution time: 22_694 nanoseconds. - Weight::from_ref_time(23_613_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_158_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - // Minimum execution time: 22_572 nanoseconds. - Weight::from_ref_time(24_121_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_525_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - // Minimum execution time: 23_949 nanoseconds. - Weight::from_ref_time(24_347_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (19_858_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 23_102 nanoseconds. - Weight::from_ref_time(23_518_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_045_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, s: u32, ) -> Weight { - // Minimum execution time: 41_032 nanoseconds. - Weight::from_ref_time(42_845_624 as u64) - // Standard Error: 1_274 - .saturating_add(Weight::from_ref_time(1_875 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn set_metadata(n: u32, s: u32, ) -> Weight { + (32_395_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 42_570 nanoseconds. - Weight::from_ref_time(42_957_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (32_893_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Minimum execution time: 22_768 nanoseconds. - Weight::from_ref_time(23_868_816 as u64) - // Standard Error: 612 - .saturating_add(Weight::from_ref_time(1_602 as u64).saturating_mul(n as u64)) - // Standard Error: 612 - .saturating_add(Weight::from_ref_time(2_097 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + (19_586_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - // Minimum execution time: 41_863 nanoseconds. - Weight::from_ref_time(42_643_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (32_478_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - // Minimum execution time: 21_747 nanoseconds. - Weight::from_ref_time(22_595_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (17_143_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 45_602 nanoseconds. - Weight::from_ref_time(46_004_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (36_389_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - // Minimum execution time: 70_944 nanoseconds. - Weight::from_ref_time(71_722_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (61_854_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 46_316 nanoseconds. - Weight::from_ref_time(46_910_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (36_759_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - // Minimum execution time: 47_145 nanoseconds. - Weight::from_ref_time(47_611_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (37_753_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -290,211 +259,183 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - // Minimum execution time: 33_241 nanoseconds. - Weight::from_ref_time(33_873_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (27_167_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - // Minimum execution time: 19_883 nanoseconds. - Weight::from_ref_time(20_651_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (15_473_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) // Storage: System Account (r:5000 w:5000) // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) - /// The range of component `c` is `[0, 5000]`. - /// The range of component `s` is `[0, 5000]`. - /// The range of component `a` is `[0, 500]`. fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - // Minimum execution time: 76_222_544 nanoseconds. - Weight::from_ref_time(76_864_587_000 as u64) - // Standard Error: 127_086 - .saturating_add(Weight::from_ref_time(8_645_143 as u64).saturating_mul(c as u64)) - // Standard Error: 127_086 - .saturating_add(Weight::from_ref_time(11_281_301 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(c as u64))) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(a as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(c as u64))) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) + (0 as Weight) + // Standard Error: 37_000 + .saturating_add((17_145_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 37_000 + .saturating_add((19_333_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 375_000 + .saturating_add((17_046_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - // Minimum execution time: 36_782 nanoseconds. - Weight::from_ref_time(37_340_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (30_819_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - // Minimum execution time: 44_425 nanoseconds. - Weight::from_ref_time(45_485_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (35_212_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 58_294 nanoseconds. - Weight::from_ref_time(59_447_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (47_401_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - // Minimum execution time: 46_704 nanoseconds. - Weight::from_ref_time(47_521_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (42_300_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - // Minimum execution time: 57_647 nanoseconds. - Weight::from_ref_time(58_417_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (47_946_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - // Minimum execution time: 26_827 nanoseconds. - Weight::from_ref_time(27_373_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (21_670_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - // Minimum execution time: 26_291 nanoseconds. - Weight::from_ref_time(26_854_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (21_503_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - // Minimum execution time: 22_694 nanoseconds. - Weight::from_ref_time(23_613_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_158_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - // Minimum execution time: 22_572 nanoseconds. - Weight::from_ref_time(24_121_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_525_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - // Minimum execution time: 23_949 nanoseconds. - Weight::from_ref_time(24_347_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (19_858_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 23_102 nanoseconds. - Weight::from_ref_time(23_518_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_045_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, s: u32, ) -> Weight { - // Minimum execution time: 41_032 nanoseconds. - Weight::from_ref_time(42_845_624 as u64) - // Standard Error: 1_274 - .saturating_add(Weight::from_ref_time(1_875 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn set_metadata(n: u32, s: u32, ) -> Weight { + (32_395_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 42_570 nanoseconds. - Weight::from_ref_time(42_957_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (32_893_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Minimum execution time: 22_768 nanoseconds. - Weight::from_ref_time(23_868_816 as u64) - // Standard Error: 612 - .saturating_add(Weight::from_ref_time(1_602 as u64).saturating_mul(n as u64)) - // Standard Error: 612 - .saturating_add(Weight::from_ref_time(2_097 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + (19_586_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - // Minimum execution time: 41_863 nanoseconds. - Weight::from_ref_time(42_643_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (32_478_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - // Minimum execution time: 21_747 nanoseconds. - Weight::from_ref_time(22_595_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (17_143_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 45_602 nanoseconds. - Weight::from_ref_time(46_004_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (36_389_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - // Minimum execution time: 70_944 nanoseconds. - Weight::from_ref_time(71_722_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (61_854_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 46_316 nanoseconds. - Weight::from_ref_time(46_910_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (36_759_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - // Minimum execution time: 47_145 nanoseconds. - Weight::from_ref_time(47_611_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (37_753_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 9c6056497118c..1ddf3888d3c96 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -166,7 +166,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Swap action. type SwapAction: SwapAction + Parameter + MaxEncodedLen; /// Limit of proof size. @@ -243,7 +243,7 @@ pub mod pallet { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] pub fn create_swap( origin: OriginFor, target: T::AccountId, @@ -280,10 +280,9 @@ pub mod pallet { /// the operation fails. This is used for weight calculation. #[pallet::weight( T::DbWeight::get().reads_writes(1, 1) - .saturating_add(action.weight()) - .ref_time() .saturating_add(40_000_000) - .saturating_add((proof.len() as u64).saturating_mul(100)) + .saturating_add((proof.len() as Weight).saturating_mul(100)) + .saturating_add(action.weight()) )] pub fn claim_swap( origin: OriginFor, @@ -318,7 +317,7 @@ pub mod pallet { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] pub fn cancel_swap( origin: OriginFor, target: T::AccountId, @@ -334,7 +333,7 @@ pub mod pallet { ); swap.action.cancel(&swap.source); - PendingSwaps::::remove(&target, hashed_proof); + PendingSwaps::::remove(&target, hashed_proof.clone()); Self::deposit_event(Event::SwapCancelled { account: target, proof: hashed_proof }); diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 5ad63dfce55b4..2352e7852d090 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -30,23 +30,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -65,14 +65,14 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SwapAction = BalanceSwapAction; type ProofLimit = ConstU32<1024>; } @@ -100,9 +100,9 @@ fn two_party_successful_swap() { // A creates the swap on chain1. chain1.execute_with(|| { AtomicSwap::create_swap( - RuntimeOrigin::signed(A), + Origin::signed(A), B, - hashed_proof, + hashed_proof.clone(), BalanceSwapAction::new(50), 1000, ) @@ -115,9 +115,9 @@ fn two_party_successful_swap() { // B creates the swap on chain2. chain2.execute_with(|| { AtomicSwap::create_swap( - RuntimeOrigin::signed(B), + Origin::signed(B), A, - hashed_proof, + hashed_proof.clone(), BalanceSwapAction::new(75), 1000, ) @@ -129,12 +129,8 @@ fn two_party_successful_swap() { // A reveals the proof and claims the swap on chain2. chain2.execute_with(|| { - AtomicSwap::claim_swap( - RuntimeOrigin::signed(A), - proof.to_vec(), - BalanceSwapAction::new(75), - ) - .unwrap(); + AtomicSwap::claim_swap(Origin::signed(A), proof.to_vec(), BalanceSwapAction::new(75)) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 + 75); assert_eq!(Balances::free_balance(B), 200 - 75); @@ -142,12 +138,8 @@ fn two_party_successful_swap() { // B use the revealed proof to claim the swap on chain1. chain1.execute_with(|| { - AtomicSwap::claim_swap( - RuntimeOrigin::signed(B), - proof.to_vec(), - BalanceSwapAction::new(50), - ) - .unwrap(); + AtomicSwap::claim_swap(Origin::signed(B), proof.to_vec(), BalanceSwapAction::new(50)) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); assert_eq!(Balances::free_balance(B), 200 + 50); diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 07fa9aa680e80..636a28692ba28 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -30,6 +30,7 @@ use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::IdentityLookup, }; +use sp_std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -48,7 +49,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -56,16 +57,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -85,17 +86,18 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } -parameter_types! { - static DisabledValidatorTestValue: Vec = Default::default(); +thread_local! { + static DISABLED_VALIDATORS: RefCell> = RefCell::new(Default::default()); } pub struct MockDisabledValidators; impl MockDisabledValidators { pub fn disable_validator(index: AuthorityIndex) { - DisabledValidatorTestValue::mutate(|v| { - if let Err(i) = v.binary_search(&index) { - v.insert(i, index); + DISABLED_VALIDATORS.with(|v| { + let mut disabled = v.borrow_mut(); + if let Err(i) = disabled.binary_search(&index) { + disabled.insert(i, index); } }) } @@ -103,7 +105,7 @@ impl MockDisabledValidators { impl DisabledValidators for MockDisabledValidators { fn is_disabled(index: AuthorityIndex) -> bool { - DisabledValidatorTestValue::get().binary_search(&index).is_ok() + DISABLED_VALIDATORS.with(|v| v.borrow().binary_search(&index).is_ok()) } } diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index b642a9ac283f2..a56d8e785f6ac 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -209,7 +209,7 @@ mod tests { type Keys = UintAuthorityId; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = TestSessionHandler; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; type NextSessionRotation = pallet_session::PeriodicSessions; @@ -227,7 +227,7 @@ mod tests { pub const Period: BlockNumber = 1; pub const Offset: BlockNumber = 0; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -235,16 +235,16 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AuthorityId; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index c08e773abe3a7..8ddccfd9cf939 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -190,7 +190,7 @@ pub mod pallet { T::EventHandler::note_author(author); } - Weight::zero() + 0 } fn on_finalize(_: T::BlockNumber) { @@ -460,7 +460,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -468,16 +468,16 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -738,7 +738,7 @@ mod tests { System::reset_events(); System::initialize(¤t_depth, &parent_hash, &Default::default()); Authorship::on_initialize(current_depth); - Authorship::set_uncles(RuntimeOrigin::none(), uncles).unwrap(); + Authorship::set_uncles(Origin::none(), uncles).unwrap(); Authorship::on_finalize(current_depth); max_item_count = std::cmp::max(max_item_count, ::Uncles::get().len()); diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 9f79a404724e0..dd76726df3017 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -43,7 +43,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index 20002c97f8e56..ac7ab28b5164a 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -17,8 +17,6 @@ //! Benchmarks for the BABE Pallet. -#![cfg(feature = "runtime-benchmarks")] - use super::*; use frame_benchmarking::benchmarks; @@ -72,3 +70,30 @@ benchmarks! { crate::mock::Test, ) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + + #[test] + fn test_generate_equivocation_report_blob() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + let offending_authority_index = 0; + let offending_authority_pair = &pairs[0]; + + ext.execute_with(|| { + start_era(1); + + let equivocation_proof = generate_equivocation_proof( + offending_authority_index, + offending_authority_pair, + CurrentSlot::::get() + 1, + ); + + println!("equivocation_proof: {:?}", equivocation_proof); + println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); + }); + } +} diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index d3e0c9d044883..57c74323b7932 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -38,17 +38,15 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - let ref_time_weight = (35u64 * WEIGHT_PER_MICROS) - .saturating_add((175u64 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + (35 * WEIGHT_PER_MICROS) + .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(110u64 * WEIGHT_PER_MICROS) + .saturating_add(110 * WEIGHT_PER_MICROS) // report offence - .saturating_add(110u64 * WEIGHT_PER_MICROS) - .saturating_add(25u64 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(110 * WEIGHT_PER_MICROS) + .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)); - - ref_time_weight + .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) } } diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index f55bda751887d..df46f3544b389 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -284,9 +284,9 @@ impl Offence self.slot } - fn slash_fraction(&self, offenders_count: u32) -> Perbill { + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index eadaa036332fa..1effc2c1989fa 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -23,13 +23,13 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::{DispatchResultWithPostInfo, Pays}, + dispatch::DispatchResultWithPostInfo, ensure, traits::{ ConstU32, DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, OneSessionHandler, }, - weights::Weight, + weights::{Pays, Weight}, BoundedVec, WeakBoundedVec, }; use sp_application_crypto::ByteArray; @@ -306,7 +306,6 @@ pub mod pallet { /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. #[pallet::storage] - #[pallet::getter(fn epoch_config)] pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; /// The configuration for the next epoch, `None` if the config will not change @@ -337,7 +336,7 @@ pub mod pallet { /// Initialization fn on_initialize(now: BlockNumberFor) -> Weight { Self::initialize(now); - Weight::zero() + 0 } /// Block finalization @@ -1009,6 +1008,6 @@ pub mod migrations { writes += 3; - T::DbWeight::get().reads_writes(reads, writes) + T::DbWeight::get().writes(writes) + T::DbWeight::get().reads(reads) } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 204de8aae172e..5677eb7e28e49 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -66,7 +66,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -74,17 +74,17 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = DummyValidatorId; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -98,10 +98,10 @@ impl frame_system::Config for Test { impl frame_system::offchain::SendTransactionTypes for Test where - RuntimeCall: From, + Call: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type OverarchingCall = Call; + type Extrinsic = TestXt; } impl_opaque_keys! { @@ -111,7 +111,7 @@ impl_opaque_keys! { } impl pallet_session::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; @@ -147,7 +147,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); @@ -178,16 +178,13 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { type MaxNominations = ConstU32<16>; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type CurrencyBalance = ::Balance; type Slash = (); @@ -202,19 +199,17 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; - type ElectionProvider = onchain::OnChainExecution; + type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } impl pallet_offences::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } @@ -434,7 +429,7 @@ pub fn generate_equivocation_proof( System::reset_events(); System::initialize(¤t_block, &parent_hash, &pre_digest); System::set_block_number(current_block); - Timestamp::set_timestamp(*current_slot * Babe::slot_duration()); + Timestamp::set_timestamp(current_block); System::finalize() }; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index d4132e6378540..0859bb7a40849 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -20,8 +20,8 @@ use super::{Call, *}; use frame_support::{ assert_err, assert_noop, assert_ok, - dispatch::{GetDispatchInfo, Pays}, traits::{Currency, EstimateNextSessionRotation, OnFinalize}, + weights::{GetDispatchInfo, Pays}, }; use mock::*; use pallet_session::ShouldEndSession; @@ -289,7 +289,7 @@ fn can_enact_next_config() { assert_eq!(NextEpochConfig::::get(), Some(next_config.clone())); Babe::plan_config_change( - RuntimeOrigin::root(), + Origin::root(), NextConfigDescriptor::V1 { c: next_next_config.c, allowed_slots: next_next_config.allowed_slots, @@ -323,15 +323,15 @@ fn only_root_can_enact_config_change() { let next_config = NextConfigDescriptor::V1 { c: (1, 4), allowed_slots: AllowedSlots::PrimarySlots }; - let res = Babe::plan_config_change(RuntimeOrigin::none(), next_config.clone()); + let res = Babe::plan_config_change(Origin::none(), next_config.clone()); assert_noop!(res, DispatchError::BadOrigin); - let res = Babe::plan_config_change(RuntimeOrigin::signed(1), next_config.clone()); + let res = Babe::plan_config_change(Origin::signed(1), next_config.clone()); assert_noop!(res, DispatchError::BadOrigin); - let res = Babe::plan_config_change(RuntimeOrigin::root(), next_config); + let res = Babe::plan_config_change(Origin::root(), next_config); assert!(res.is_ok()); }); @@ -464,7 +464,7 @@ fn report_equivocation_current_session_works() { // report the equivocation Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -536,7 +536,7 @@ fn report_equivocation_old_session_works() { // report the equivocation Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -588,7 +588,7 @@ fn report_equivocation_invalid_key_owner_proof() { key_owner_proof.session = 0; assert_err!( Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof.clone()), key_owner_proof ), @@ -608,7 +608,7 @@ fn report_equivocation_invalid_key_owner_proof() { assert_err!( Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ), @@ -642,7 +642,7 @@ fn report_equivocation_invalid_equivocation_proof() { let assert_invalid_equivocation = |equivocation_proof| { assert_err!( Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof.clone(), ), @@ -659,7 +659,7 @@ fn report_equivocation_invalid_equivocation_proof() { equivocation_proof.second_header = equivocation_proof.first_header.clone(); assert_invalid_equivocation(equivocation_proof); - // missing pre-runtime digest from one header + // missing preruntime digest from one header let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, @@ -784,7 +784,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { // we submit the report Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -823,7 +823,7 @@ fn report_equivocation_has_valid_weight() { .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) - .all(|w| w[0].ref_time() < w[1].ref_time())); + .all(|w| w[0] < w[1])); } #[test] @@ -852,13 +852,12 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - // TODO: account for proof size weight - assert!(info.weight.ref_time() > 0); + assert!(info.weight > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. let post_info = Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof.clone()), key_owner_proof.clone(), ) @@ -872,7 +871,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation again which is invalid now since it is // duplicate. let post_info = Babe::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -927,24 +926,3 @@ fn add_epoch_configurations_migration_works() { assert_eq!(PendingEpochConfigChange::::get(), Some(next_config_descriptor)); }); } - -#[test] -fn generate_equivocation_report_blob() { - let (pairs, mut ext) = new_test_ext_with_pairs(3); - - let offending_authority_index = 0; - let offending_authority_pair = &pairs[0]; - - ext.execute_with(|| { - start_era(1); - - let equivocation_proof = generate_equivocation_proof( - offending_authority_index, - offending_authority_pair, - CurrentSlot::::get() + 1, - ); - - println!("equivocation_proof: {:?}", equivocation_proof); - println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); - }); -} diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 19eb66ae624af..9590d3d3ec4a4 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -41,18 +41,12 @@ sp-core = { version = "6.0.0", path = "../../primitives/core"} sp-io = { version = "6.0.0", path = "../../primitives/io"} sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } -frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support", features = ["runtime-benchmarks"] } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } [features] default = ["std"] std = [ - "sp-tracing?/std", - "sp-io?/std", - "sp-core?/std", - "pallet-balances?/std", - "frame-benchmarking?/std", - "scale-info/std", "codec/std", "sp-runtime/std", "sp-std/std", @@ -65,7 +59,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "sp-core", "sp-io", - "pallet-balances/runtime-benchmarks", + "pallet-balances", "sp-tracing", "frame-election-provider-support/runtime-benchmarks", ] @@ -74,6 +68,5 @@ fuzz = [ "sp-io", "pallet-balances", "sp-tracing", - "frame-election-provider-support/fuzz", ] try-runtime = [ "frame-support/try-runtime" ] diff --git a/frame/bags-list/fuzzer/Cargo.toml b/frame/bags-list/fuzzer/Cargo.toml index 0f10077dcbce8..ec7d98255b019 100644 --- a/frame/bags-list/fuzzer/Cargo.toml +++ b/frame/bags-list/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ publish = false [dependencies] honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } -frame-election-provider-support = { version = "4.0.0-dev", features = ["fuzz"], path = "../../election-provider-support" } +frame-election-provider-support = { version = "4.0.0-dev", features = ["runtime-benchmarks"], path = "../../election-provider-support" } pallet-bags-list = { version = "4.0.0-dev", features = ["fuzz"], path = ".." } [[bin]] diff --git a/frame/bags-list/fuzzer/src/main.rs b/frame/bags-list/fuzzer/src/main.rs index 9f7ca464cc2b8..c17fbe0a2f77f 100644 --- a/frame/bags-list/fuzzer/src/main.rs +++ b/frame/bags-list/fuzzer/src/main.rs @@ -88,7 +88,7 @@ fn main() { }, } - assert!(BagsList::try_state().is_ok()); + assert!(BagsList::sanity_check().is_ok()); }) }); } diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs index fc25e3b65ddb1..458064cf79f57 100644 --- a/frame/bags-list/remote-tests/src/lib.rs +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -18,39 +18,30 @@ //! Utilities for remote-testing pallet-bags-list. use frame_election_provider_support::ScoreProvider; -use pallet_bags_list::Instance1; use sp_std::prelude::*; /// A common log target to use. pub const LOG_TARGET: &str = "runtime::bags-list::remote-tests"; pub mod migration; +pub mod sanity_check; pub mod snapshot; -pub mod try_state; /// A wrapper for a runtime that the functions of this crate expect. /// /// For example, this can be the `Runtime` type of the Polkadot runtime. -pub trait RuntimeT: - pallet_staking::Config + pallet_bags_list::Config + frame_system::Config -{ -} -impl< - I: 'static, - T: pallet_staking::Config + pallet_bags_list::Config + frame_system::Config, - > RuntimeT for T +pub trait RuntimeT: + pallet_staking::Config + pallet_bags_list::Config + frame_system::Config { } +impl RuntimeT for T {} fn percent(portion: u32, total: u32) -> f64 { (portion as f64 / total as f64) * 100f64 } /// Display the number of nodes in each bag, while identifying those that need a rebag. -pub fn display_and_check_bags>( - currency_unit: u64, - currency_name: &'static str, -) { +pub fn display_and_check_bags(currency_unit: u64, currency_name: &'static str) { use frame_election_provider_support::SortedListProvider; use frame_support::traits::Get; @@ -64,8 +55,7 @@ pub fn display_and_check_bags>( let mut seen_in_bags = 0; let mut rebaggable = 0; let mut active_bags = 0; - for vote_weight_thresh in >::BagThresholds::get() - { + for vote_weight_thresh in ::BagThresholds::get() { let vote_weight_thresh_u64: u64 = (*vote_weight_thresh) .try_into() .map_err(|_| "runtime must configure score to at most u64 to use this test") @@ -74,9 +64,7 @@ pub fn display_and_check_bags>( let vote_weight_thresh_as_unit = vote_weight_thresh_u64 as f64 / currency_unit as f64; let pretty_thresh = format!("Threshold: {}. {}", vote_weight_thresh_as_unit, currency_name); - let bag = match pallet_bags_list::Pallet::::list_bags_get( - *vote_weight_thresh, - ) { + let bag = match pallet_bags_list::Pallet::::list_bags_get(*vote_weight_thresh) { Some(bag) => bag, None => { log::info!(target: LOG_TARGET, "{} NO VOTERS.", pretty_thresh); @@ -87,8 +75,7 @@ pub fn display_and_check_bags>( active_bags += 1; for id in bag.std_iter().map(|node| node.std_id().clone()) { - let vote_weight = - >::ScoreProvider::score(&id); + let vote_weight = ::ScoreProvider::score(&id); let vote_weight_thresh_u64: u64 = (*vote_weight_thresh) .try_into() .map_err(|_| "runtime must configure score to at most u64 to use this test") @@ -105,8 +92,8 @@ pub fn display_and_check_bags>( ); } - let node = pallet_bags_list::Node::::get(&id) - .expect("node in bag must exist."); + let node = + pallet_bags_list::Node::::get(&id).expect("node in bag must exist."); if node.is_misplaced(vote_weight) { rebaggable += 1; let notional_bag = pallet_bags_list::notional_bag_for::(vote_weight); @@ -154,7 +141,7 @@ pub fn display_and_check_bags>( "a total of {} nodes are in {} active bags [{} total bags], {} of which can be rebagged.", voter_list_count, active_bags, - >::BagThresholds::get().len(), + ::BagThresholds::get().len(), rebaggable, ); } diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs index b013472b4c90e..c4cd73c45d377 100644 --- a/frame/bags-list/remote-tests/src/migration.rs +++ b/frame/bags-list/remote-tests/src/migration.rs @@ -24,15 +24,11 @@ use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Test voter bags migration. `currency_unit` is the number of planks per the the runtimes `UNITS` /// (i.e. number of decimal places per DOT, KSM etc) -pub async fn execute( +pub async fn execute( currency_unit: u64, currency_name: &'static str, ws_url: String, -) where - Runtime: RuntimeT, - Block: BlockT, - Block::Header: DeserializeOwned, -{ +) { let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), diff --git a/frame/bags-list/remote-tests/src/try_state.rs b/frame/bags-list/remote-tests/src/sanity_check.rs similarity index 83% rename from frame/bags-list/remote-tests/src/try_state.rs rename to frame/bags-list/remote-tests/src/sanity_check.rs index d3fb63f045a64..1027efb8539ee 100644 --- a/frame/bags-list/remote-tests/src/try_state.rs +++ b/frame/bags-list/remote-tests/src/sanity_check.rs @@ -25,20 +25,15 @@ use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute the sanity check of the bags-list. -pub async fn execute( +pub async fn execute( currency_unit: u64, currency_name: &'static str, ws_url: String, -) where - Runtime: crate::RuntimeT, - Block: BlockT, - Block::Header: DeserializeOwned, -{ +) { let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), - pallets: vec![pallet_bags_list::Pallet::::name() - .to_string()], + pallets: vec![pallet_bags_list::Pallet::::name().to_string()], ..Default::default() })) .inject_hashed_prefix(&>::prefix_hash()) @@ -49,7 +44,7 @@ pub async fn execute( ext.execute_with(|| { sp_core::crypto::set_default_ss58_version(Runtime::SS58Prefix::get().try_into().unwrap()); - pallet_bags_list::Pallet::::try_state().unwrap(); + pallet_bags_list::Pallet::::sanity_check().unwrap(); log::info!(target: crate::LOG_TARGET, "executed bags-list sanity check with no errors."); crate::display_and_check_bags::(currency_unit, currency_name); diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs index cfe065924bd92..408f5f2bd8aa2 100644 --- a/frame/bags-list/remote-tests/src/snapshot.rs +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -22,12 +22,11 @@ use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute create a snapshot from pallet-staking. -pub async fn execute(voter_limit: Option, currency_unit: u64, ws_url: String) -where - Runtime: crate::RuntimeT, - Block: BlockT, - Block::Header: DeserializeOwned, -{ +pub async fn execute( + voter_limit: Option, + currency_unit: u64, + ws_url: String, +) { use frame_support::storage::generator::StorageMap; let mut ext = Builder::::new() @@ -35,8 +34,7 @@ where transport: ws_url.to_string().into(), // NOTE: we don't scrape pallet-staking, this kinda ensures that the source of the data // is bags-list. - pallets: vec![pallet_bags_list::Pallet::::name() - .to_string()], + pallets: vec![pallet_bags_list::Pallet::::name().to_string()], at: None, ..Default::default() })) diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs index 1f66697cb6765..dba0c9ee1e623 100644 --- a/frame/bags-list/src/benchmarks.rs +++ b/frame/bags-list/src/benchmarks.rs @@ -25,7 +25,7 @@ use frame_support::{assert_ok, traits::Get}; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::One; -frame_benchmarking::benchmarks_instance_pallet! { +frame_benchmarking::benchmarks! { rebag_non_terminal { // An expensive case for rebag-ing (rebag a non-terminal node): // @@ -57,8 +57,6 @@ frame_benchmarking::benchmarks_instance_pallet! { let dest_head: T::AccountId = account("dest_head", 0, 0); assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); - let origin_middle_lookup = T::Lookup::unlookup(origin_middle.clone()); - // the bags are in the expected state after initial setup. assert_eq!( List::::get_bags(), @@ -71,7 +69,7 @@ frame_benchmarking::benchmarks_instance_pallet! { let caller = whitelisted_caller(); // update the weight of `origin_middle` to guarantee it will be rebagged into the destination. T::ScoreProvider::set_score_of(&origin_middle, dest_bag_thresh); - }: rebag(SystemOrigin::Signed(caller), origin_middle_lookup.clone()) + }: rebag(SystemOrigin::Signed(caller), origin_middle.clone()) verify { // check the bags have updated as expected. assert_eq!( @@ -99,7 +97,7 @@ frame_benchmarking::benchmarks_instance_pallet! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); // define our origin and destination thresholds. let origin_bag_thresh = T::BagThresholds::get()[0]; @@ -116,8 +114,6 @@ frame_benchmarking::benchmarks_instance_pallet! { let dest_head: T::AccountId = account("dest_head", 0, 0); assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); - let origin_tail_lookup = T::Lookup::unlookup(origin_tail.clone()); - // the bags are in the expected state after initial setup. assert_eq!( List::::get_bags(), @@ -130,7 +126,7 @@ frame_benchmarking::benchmarks_instance_pallet! { let caller = whitelisted_caller(); // update the weight of `origin_tail` to guarantee it will be rebagged into the destination. T::ScoreProvider::set_score_of(&origin_tail, dest_bag_thresh); - }: rebag(SystemOrigin::Signed(caller), origin_tail_lookup.clone()) + }: rebag(SystemOrigin::Signed(caller), origin_tail.clone()) verify { // check the bags have updated as expected. assert_eq!( @@ -150,7 +146,7 @@ frame_benchmarking::benchmarks_instance_pallet! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); let bag_thresh = T::BagThresholds::get()[0]; @@ -170,15 +166,13 @@ frame_benchmarking::benchmarks_instance_pallet! { T::ScoreProvider::set_score_of(&lighter, bag_thresh - One::one()); T::ScoreProvider::set_score_of(&heavier, bag_thresh); - let lighter_lookup = T::Lookup::unlookup(lighter.clone()); - assert_eq!( List::::iter().map(|n| n.id().clone()).collect::>(), vec![lighter.clone(), heavier_prev.clone(), heavier.clone(), heavier_next.clone()] ); whitelist_account!(heavier); - }: _(SystemOrigin::Signed(heavier.clone()), lighter_lookup.clone()) + }: _(SystemOrigin::Signed(heavier.clone()), lighter.clone()) verify { assert_eq!( List::::iter().map(|n| n.id().clone()).collect::>(), diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 2b48fbf0ca630..7eee8fdfa23d8 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -56,7 +56,7 @@ use codec::FullCodec; use frame_election_provider_support::{ScoreProvider, SortedListProvider}; use frame_system::ensure_signed; -use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded, StaticLookup}; +use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded}; use sp_std::prelude::*; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -90,8 +90,6 @@ macro_rules! log { }; } -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -105,8 +103,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; @@ -225,9 +222,8 @@ pub mod pallet { /// /// If `dislocated` does not exists, it returns an error. #[pallet::weight(T::WeightInfo::rebag_non_terminal().max(T::WeightInfo::rebag_terminal()))] - pub fn rebag(origin: OriginFor, dislocated: AccountIdLookupOf) -> DispatchResult { + pub fn rebag(origin: OriginFor, dislocated: T::AccountId) -> DispatchResult { ensure_signed(origin)?; - let dislocated = T::Lookup::lookup(dislocated)?; let current_score = T::ScoreProvider::score(&dislocated); let _ = Pallet::::do_rebag(&dislocated, current_score) .map_err::, _>(Into::into)?; @@ -243,12 +239,8 @@ pub mod pallet { /// - both nodes are within the same bag, /// - and `origin` has a greater `Score` than `lighter`. #[pallet::weight(T::WeightInfo::put_in_front_of())] - pub fn put_in_front_of( - origin: OriginFor, - lighter: AccountIdLookupOf, - ) -> DispatchResult { + pub fn put_in_front_of(origin: OriginFor, lighter: T::AccountId) -> DispatchResult { let heavier = ensure_signed(origin)?; - let lighter = T::Lookup::lookup(lighter)?; List::::put_in_front_of(&lighter, &heavier) .map_err::, _>(Into::into) .map_err::(Into::into) @@ -264,11 +256,6 @@ pub mod pallet { "thresholds must strictly increase, and have no duplicates", ); } - - #[cfg(feature = "try-runtime")] - fn try_state(_: BlockNumberFor) -> Result<(), &'static str> { - >::try_state() - } } } @@ -346,8 +333,14 @@ impl, I: 'static> SortedListProvider for Pallet List::::unsafe_regenerate(all, score_of) } - fn try_state() -> Result<(), &'static str> { - List::::try_state() + #[cfg(feature = "std")] + fn sanity_check() -> Result<(), &'static str> { + List::::sanity_check() + } + + #[cfg(not(feature = "std"))] + fn sanity_check() -> Result<(), &'static str> { + Ok(()) } fn unsafe_clear() { @@ -387,7 +380,7 @@ impl, I: 'static> ScoreProvider for Pallet { Node::::get(id).map(|node| node.score()).unwrap_or_default() } - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] + #[cfg(any(feature = "runtime-benchmarks", test))] fn set_score_of(id: &T::AccountId, new_score: T::Score) { ListNodes::::mutate(id, |maybe_node| { if let Some(node) = maybe_node.as_mut() { diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index 272526ad1a636..b4f852685842d 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -28,8 +28,8 @@ use crate::Config; use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::ScoreProvider; use frame_support::{ - defensive, ensure, - traits::{Defensive, DefensiveOption, Get}, + ensure, + traits::{Defensive, Get}, DefaultNoBound, PalletError, }; use scale_info::TypeInfo; @@ -220,8 +220,7 @@ impl, I: 'static> List { crate::ListBags::::remove(removed_bag); } - #[cfg(feature = "std")] - debug_assert_eq!(Self::try_state(), Ok(())); + debug_assert_eq!(Self::sanity_check(), Ok(())); num_affected } @@ -326,7 +325,8 @@ impl, I: 'static> List { crate::log!( debug, - "inserted {:?} with score {:?} into bag {:?}, new count is {}", + "inserted {:?} with score {:? + } into bag {:?}, new count is {}", id, score, bag_score, @@ -457,8 +457,11 @@ impl, I: 'static> List { // re-fetch `lighter_node` from storage since it may have been updated when `heavier_node` // was removed. - let lighter_node = - Node::::get(lighter_id).defensive_ok_or_else(|| ListError::NodeNotFound)?; + let lighter_node = Node::::get(lighter_id).ok_or_else(|| { + debug_assert!(false, "id that should exist cannot be found"); + crate::log!(warn, "id that should exist cannot be found"); + ListError::NodeNotFound + })?; // insert `heavier_node` directly in front of `lighter_node`. This will update both nodes // in storage and update the node counter. @@ -505,7 +508,7 @@ impl, I: 'static> List { node.put(); } - /// Check the internal state of the list. + /// Sanity check the list. /// /// This should be called from the call-site, whenever one of the mutating apis (e.g. `insert`) /// is being used, after all other staking data (such as counter) has been updated. It checks: @@ -514,7 +517,8 @@ impl, I: 'static> List { /// * length of this list is in sync with `ListNodes::count()`, /// * and sanity-checks all bags and nodes. This will cascade down all the checks and makes sure /// all bags and nodes are checked per *any* update to `List`. - pub(crate) fn try_state() -> Result<(), &'static str> { + #[cfg(feature = "std")] + pub(crate) fn sanity_check() -> Result<(), &'static str> { let mut seen_in_list = BTreeSet::new(); ensure!( Self::iter().map(|node| node.id).all(|id| seen_in_list.insert(id)), @@ -542,7 +546,7 @@ impl, I: 'static> List { thresholds.into_iter().filter_map(|t| Bag::::get(t)) }; - let _ = active_bags.clone().try_for_each(|b| b.try_state())?; + let _ = active_bags.clone().try_for_each(|b| b.sanity_check())?; let nodes_in_bags_count = active_bags.clone().fold(0u32, |acc, cur| acc + cur.iter().count() as u32); @@ -553,12 +557,17 @@ impl, I: 'static> List { // check that all nodes are sane. We check the `ListNodes` storage item directly in case we // have some "stale" nodes that are not in a bag. for (_id, node) in crate::ListNodes::::iter() { - node.try_state()? + node.sanity_check()? } Ok(()) } + #[cfg(not(feature = "std"))] + pub(crate) fn sanity_check() -> Result<(), &'static str> { + Ok(()) + } + /// Returns the nodes of all non-empty bags. For testing and benchmarks. #[cfg(any(feature = "std", feature = "runtime-benchmarks"))] #[allow(dead_code)] @@ -692,7 +701,8 @@ impl, I: 'static> Bag { if *tail == node.id { // this should never happen, but this check prevents one path to a worst case // infinite loop. - defensive!("system logic error: inserting a node who has the id of tail"); + debug_assert!(false, "system logic error: inserting a node who has the id of tail"); + crate::log!(warn, "system logic error: inserting a node who has the id of tail"); return }; } @@ -743,7 +753,7 @@ impl, I: 'static> Bag { } } - /// Check the internal state of the bag. + /// Sanity check this bag. /// /// Should be called by the call-site, after any mutating operation on a bag. The call site of /// this struct is always `List`. @@ -751,7 +761,8 @@ impl, I: 'static> Bag { /// * Ensures head has no prev. /// * Ensures tail has no next. /// * Ensures there are no loops, traversal from head to tail is correct. - fn try_state(&self) -> Result<(), &'static str> { + #[cfg(feature = "std")] + fn sanity_check(&self) -> Result<(), &'static str> { frame_support::ensure!( self.head() .map(|head| head.prev().is_none()) @@ -790,6 +801,7 @@ impl, I: 'static> Bag { } /// Check if the bag contains a node with `id`. + #[cfg(feature = "std")] fn contains(&self, id: &T::AccountId) -> bool { self.iter().any(|n| n.id() == id) } @@ -882,7 +894,7 @@ impl, I: 'static> Node { &self.id } - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] + #[cfg(any(feature = "runtime-benchmarks", test))] pub fn set_score(&mut self, s: T::Score) { self.score = s } @@ -894,7 +906,8 @@ impl, I: 'static> Node { self.bag_upper } - fn try_state(&self) -> Result<(), &'static str> { + #[cfg(feature = "std")] + fn sanity_check(&self) -> Result<(), &'static str> { let expected_bag = Bag::::get(self.bag_upper).ok_or("bag not found for node")?; let id = self.id(); diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs index 966ea1a74c71c..9bdd54289fd88 100644 --- a/frame/bags-list/src/list/tests.rs +++ b/frame/bags-list/src/list/tests.rs @@ -350,15 +350,15 @@ mod list { } #[test] - fn try_state_works() { + fn sanity_check_works() { ExtBuilder::default().build_and_execute_no_post_check(|| { - assert_ok!(List::::try_state()); + assert_ok!(List::::sanity_check()); }); // make sure there are no duplicates. ExtBuilder::default().build_and_execute_no_post_check(|| { Bag::::get(10).unwrap().insert_unchecked(2, 10); - assert_eq!(List::::try_state(), Err("duplicate identified")); + assert_eq!(List::::sanity_check(), Err("duplicate identified")); }); // ensure count is in sync with `ListNodes::count()`. @@ -372,7 +372,7 @@ mod list { CounterForListNodes::::mutate(|counter| *counter += 1); assert_eq!(crate::ListNodes::::count(), 5); - assert_eq!(List::::try_state(), Err("iter_count != stored_count")); + assert_eq!(List::::sanity_check(), Err("iter_count != stored_count")); }); } @@ -804,7 +804,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 13, 14]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.sanity_check()); // and the node isn't mutated when its removed assert_eq!(node_4, node_4_pre_remove); @@ -814,7 +814,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3, 13, 14]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.sanity_check()); // when removing a tail that is not pointing at the head let node_14 = Node::::get(&14).unwrap(); @@ -822,7 +822,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3, 13]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.sanity_check()); // when removing a tail that is pointing at the head let node_13 = Node::::get(&13).unwrap(); @@ -830,7 +830,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.sanity_check()); // when removing a node that is both the head & tail let node_3 = Node::::get(&3).unwrap(); @@ -846,7 +846,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_10), vec![1, 12]); - assert_ok!(bag_10.try_state()); + assert_ok!(bag_10.sanity_check()); // when removing a head that is pointing at the tail let node_1 = Node::::get(&1).unwrap(); @@ -854,7 +854,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_10), vec![12]); - assert_ok!(bag_10.try_state()); + assert_ok!(bag_10.sanity_check()); // and since we updated the bag's head/tail, we need to write this storage so we // can correctly `get` it again in later checks bag_10.put(); @@ -865,7 +865,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 18, 19]); - assert_ok!(bag_2000.try_state()); + assert_ok!(bag_2000.sanity_check()); // when removing a node that is pointing at tail, but not head let node_18 = Node::::get(&18).unwrap(); @@ -873,7 +873,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 19]); - assert_ok!(bag_2000.try_state()); + assert_ok!(bag_2000.sanity_check()); // finally, when reading from storage, the state of all bags is as expected assert_eq!( @@ -905,7 +905,7 @@ mod bags { // .. and the bag it was removed from let bag_1000 = Bag::::get(1_000).unwrap(); // is sane - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.sanity_check()); // and has the correct head and tail. assert_eq!(bag_1000.head, Some(3)); assert_eq!(bag_1000.tail, Some(4)); diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index e1dc9f777e537..a77beb23bd667 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -21,11 +21,10 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_election_provider_support::ScoreProvider; use frame_support::traits::OnRuntimeUpgrade; +use sp_runtime::traits::Zero; #[cfg(feature = "try-runtime")] use frame_support::ensure; -#[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; /// A struct that does not migration, but only checks that the counter prefix exists and is correct. pub struct CheckCounterPrefix, I: 'static>(sp_std::marker::PhantomData<(T, I)>); @@ -35,7 +34,7 @@ impl, I: 'static> OnRuntimeUpgrade for CheckCounterPrefix Result, &'static str> { + fn pre_upgrade() -> Result<(), &'static str> { // The old explicit storage item. #[frame_support::storage_alias] type CounterForListNodes, I: 'static> = @@ -53,7 +52,7 @@ impl, I: 'static> OnRuntimeUpgrade for CheckCounterPrefix::count() ); - Ok(Vec::new()) + Ok(()) } } @@ -82,13 +81,17 @@ mod old { #[frame_support::storage_alias] pub type CounterForListNodes, I: 'static> = StorageValue, u32, ValueQuery>; + + #[frame_support::storage_alias] + pub type TempStorage, I: 'static> = + StorageValue, u32, ValueQuery>; } /// A struct that migrates all bags lists to contain a score value. pub struct AddScore, I: 'static = ()>(sp_std::marker::PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for AddScore { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result<(), &'static str> { // The list node data should be corrupt at this point, so this is zero. ensure!(crate::ListNodes::::iter().count() == 0, "list node data is not corrupt"); // We can use the helper `old::ListNode` to get the existing data. @@ -96,7 +99,8 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { let tracked_node_count: u32 = old::CounterForListNodes::::get(); crate::log!(info, "number of nodes before: {:?} {:?}", iter_node_count, tracked_node_count); ensure!(iter_node_count == tracked_node_count, "Node count is wrong."); - Ok(iter_node_count.encode()) + old::TempStorage::::put(iter_node_count); + Ok(()) } fn on_runtime_upgrade() -> frame_support::weights::Weight { @@ -119,10 +123,9 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { } #[cfg(feature = "try-runtime")] - fn post_upgrade(node_count_before: Vec) -> Result<(), &'static str> { - let node_count_before: u32 = Decode::decode(&mut node_count_before.as_slice()) - .expect("the state parameter should be something that was generated by pre_upgrade"); - // Now the list node data is not corrupt anymore. + fn post_upgrade() -> Result<(), &'static str> { + let node_count_before = old::TempStorage::::take(); + // Now, the list node data is not corrupt anymore. let iter_node_count_after: u32 = crate::ListNodes::::iter().count() as u32; let tracked_node_count_after: u32 = crate::ListNodes::::count(); crate::log!( diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index 8cc96a988e72a..961bf2b83552f 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -40,7 +40,7 @@ impl frame_election_provider_support::ScoreProvider for StakingMock { *NextVoteWeightMap::get().get(id).unwrap_or(&NextVoteWeight::get()) } - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] + #[cfg(any(feature = "runtime-benchmarks", test))] fn set_score_of(id: &AccountId, weight: Self::Score) { NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().insert(*id, weight)); } @@ -49,16 +49,16 @@ impl frame_election_provider_support::ScoreProvider for StakingMock { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -78,7 +78,7 @@ parameter_types! { } impl bags_list::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); type BagThresholds = BagThresholds; type ScoreProvider = StakingMock; @@ -108,7 +108,6 @@ pub struct ExtBuilder { skip_genesis_ids: bool, } -#[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] impl ExtBuilder { /// Skip adding the default genesis ids to the list. #[cfg(test)] @@ -148,7 +147,7 @@ impl ExtBuilder { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { self.build().execute_with(|| { test(); - List::::try_state().expect("Try-state post condition failed") + List::::sanity_check().expect("Sanity check post condition failed") }) } diff --git a/frame/bags-list/src/tests.rs b/frame/bags-list/src/tests.rs index 63a395ed0d65a..01c1642f882c1 100644 --- a/frame/bags-list/src/tests.rs +++ b/frame/bags-list/src/tests.rs @@ -37,7 +37,7 @@ mod pallet { // when increasing score to the level of non-existent bag assert_eq!(List::::get_score(&42).unwrap(), 20); StakingMock::set_score_of(&42, 2_000); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); assert_eq!(List::::get_score(&42).unwrap(), 2_000); // then a new bag is created and the id moves into it @@ -48,7 +48,7 @@ mod pallet { // when decreasing score within the range of the current bag StakingMock::set_score_of(&42, 1_001); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); // then the id does not move assert_eq!( @@ -60,7 +60,7 @@ mod pallet { // when reducing score to the level of a non-existent bag StakingMock::set_score_of(&42, 30); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); // then a new bag is created and the id moves into it assert_eq!( @@ -71,7 +71,7 @@ mod pallet { // when increasing score to the level of a pre-existing bag StakingMock::set_score_of(&42, 500); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); // then the id moves into that bag assert_eq!( @@ -92,7 +92,7 @@ mod pallet { // when StakingMock::set_score_of(&4, 10); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 4)); + assert_ok!(BagsList::rebag(Origin::signed(0), 4)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 4]), (1_000, vec![2, 3])]); @@ -100,7 +100,7 @@ mod pallet { // when StakingMock::set_score_of(&3, 10); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 3)); + assert_ok!(BagsList::rebag(Origin::signed(0), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 4, 3]), (1_000, vec![2])]); @@ -111,7 +111,7 @@ mod pallet { // when StakingMock::set_score_of(&2, 10); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 2)); + assert_ok!(BagsList::rebag(Origin::signed(0), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 4, 3, 2])]); @@ -126,7 +126,7 @@ mod pallet { ExtBuilder::default().build_and_execute(|| { // when StakingMock::set_score_of(&2, 10); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 2)); + assert_ok!(BagsList::rebag(Origin::signed(0), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 2]), (1_000, vec![3, 4])]); @@ -134,7 +134,7 @@ mod pallet { // when StakingMock::set_score_of(&3, 10); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 3)); + assert_ok!(BagsList::rebag(Origin::signed(0), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 2, 3]), (1_000, vec![4])]); @@ -142,7 +142,7 @@ mod pallet { // when StakingMock::set_score_of(&4, 10); - assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 4)); + assert_ok!(BagsList::rebag(Origin::signed(0), 4)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 2, 3, 4])]); @@ -159,15 +159,12 @@ mod pallet { assert!(!node_3.is_misplaced(500)); // then calling rebag on account 3 with score 500 is a noop - assert_storage_noop!(assert_eq!(BagsList::rebag(RuntimeOrigin::signed(0), 3), Ok(()))); + assert_storage_noop!(assert_eq!(BagsList::rebag(Origin::signed(0), 3), Ok(()))); // when account 42 is not in the list assert!(!BagsList::contains(&42)); // then rebag-ing account 42 is an error - assert_storage_noop!(assert!(matches!( - BagsList::rebag(RuntimeOrigin::signed(0), 42), - Err(_) - ))); + assert_storage_noop!(assert!(matches!(BagsList::rebag(Origin::signed(0), 42), Err(_)))); }); } @@ -203,7 +200,7 @@ mod pallet { ); // any rebag is noop. - assert_storage_noop!(assert_eq!(BagsList::rebag(RuntimeOrigin::signed(0), 1), Ok(()))); + assert_storage_noop!(assert_eq!(BagsList::rebag(Origin::signed(0), 1), Ok(()))); }) } @@ -217,7 +214,7 @@ mod pallet { assert_eq!(List::::get_bags(), vec![(20, vec![10, 11])]); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(11), 10)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(11), 10)); // then assert_eq!(List::::get_bags(), vec![(20, vec![11, 10])]); @@ -234,7 +231,7 @@ mod pallet { assert_eq!(List::::get_bags(), vec![(20, vec![11, 10])]); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(11), 10)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(11), 10)); // then assert_eq!(List::::get_bags(), vec![(20, vec![11, 10])]); @@ -250,7 +247,7 @@ mod pallet { StakingMock::set_score_of(&3, 999); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(4), 3)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(4), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4, 3, 5])]); @@ -271,7 +268,7 @@ mod pallet { StakingMock::set_score_of(&5, 999); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(3), 5)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(3), 5)); // then assert_eq!( @@ -290,7 +287,7 @@ mod pallet { StakingMock::set_score_of(&2, 999); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(3), 2)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(3), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4])]); @@ -306,7 +303,7 @@ mod pallet { StakingMock::set_score_of(&3, 999); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(4), 3)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(4), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4, 3])]); @@ -322,7 +319,7 @@ mod pallet { StakingMock::set_score_of(&2, 999); // when - assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(5), 2)); + assert_ok!(BagsList::put_in_front_of(Origin::signed(5), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![5, 2, 3, 4])]); @@ -338,7 +335,7 @@ mod pallet { StakingMock::set_score_of(&4, 999); // when - BagsList::put_in_front_of(RuntimeOrigin::signed(2), 4).unwrap(); + BagsList::put_in_front_of(Origin::signed(2), 4).unwrap(); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4, 5])]); @@ -352,7 +349,7 @@ mod pallet { assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4, 5])]); // when - BagsList::put_in_front_of(RuntimeOrigin::signed(3), 5).unwrap(); + BagsList::put_in_front_of(Origin::signed(3), 5).unwrap(); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4, 3, 5])]); @@ -368,7 +365,7 @@ mod pallet { StakingMock::set_score_of(&4, 999); // when - BagsList::put_in_front_of(RuntimeOrigin::signed(2), 4).unwrap(); + BagsList::put_in_front_of(Origin::signed(2), 4).unwrap(); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4])]); @@ -385,7 +382,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(RuntimeOrigin::signed(3), 2), + BagsList::put_in_front_of(Origin::signed(3), 2), crate::pallet::Error::::List(ListError::NotHeavier) ); }); @@ -399,7 +396,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(RuntimeOrigin::signed(3), 4), + BagsList::put_in_front_of(Origin::signed(3), 4), crate::pallet::Error::::List(ListError::NotHeavier) ); }); @@ -416,7 +413,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(RuntimeOrigin::signed(5), 4), + BagsList::put_in_front_of(Origin::signed(5), 4), crate::pallet::Error::::List(ListError::NodeNotFound) ); }); @@ -430,7 +427,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(RuntimeOrigin::signed(4), 5), + BagsList::put_in_front_of(Origin::signed(4), 5), crate::pallet::Error::::List(ListError::NodeNotFound) ); }); @@ -444,7 +441,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(RuntimeOrigin::signed(4), 1), + BagsList::put_in_front_of(Origin::signed(4), 1), crate::pallet::Error::::List(ListError::NotInSameBag) ); }); diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index a298dc3172f79..a554a9bd4ad1a 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_bags_list //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/bags-list/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/bags-list/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,34 +54,31 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: VoterList ListNodes (r:4 w:4) - // Storage: VoterList ListBags (r:1 w:1) + // Storage: BagsList ListNodes (r:4 w:4) + // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - // Minimum execution time: 73_553 nanoseconds. - Weight::from_ref_time(74_366_000 as u64) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (55_040_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - // Minimum execution time: 72_700 nanoseconds. - Weight::from_ref_time(73_322_000 as u64) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (53_671_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } - // Storage: VoterList ListNodes (r:4 w:4) + // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:2 w:0) - // Storage: VoterList CounterForListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - // Minimum execution time: 71_691 nanoseconds. - Weight::from_ref_time(72_798_000 as u64) - .saturating_add(T::DbWeight::get().reads(10 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (56_410_000 as Weight) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } } @@ -92,33 +86,30 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: VoterList ListNodes (r:4 w:4) - // Storage: VoterList ListBags (r:1 w:1) + // Storage: BagsList ListNodes (r:4 w:4) + // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - // Minimum execution time: 73_553 nanoseconds. - Weight::from_ref_time(74_366_000 as u64) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (55_040_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - // Minimum execution time: 72_700 nanoseconds. - Weight::from_ref_time(73_322_000 as u64) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (53_671_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } - // Storage: VoterList ListNodes (r:4 w:4) + // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:2 w:0) - // Storage: VoterList CounterForListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - // Minimum execution time: 71_691 nanoseconds. - Weight::from_ref_time(72_798_000 as u64) - .saturating_add(RocksDbWeight::get().reads(10 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (56_410_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } } diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index fd2312993b7e7..10150f0895906 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -31,7 +31,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 206adba0f044b..4a874e4ffa1d5 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -46,7 +46,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { @@ -60,7 +60,7 @@ benchmarks_instance_pallet! { transfer_best_case { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds for transfer (their account will never reasonably be killed). let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); @@ -80,7 +80,7 @@ benchmarks_instance_pallet! { transfer_keep_alive { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds, thus a transfer will not kill account. let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); @@ -95,7 +95,7 @@ benchmarks_instance_pallet! { // Benchmark `set_balance` coming from ROOT account. This always creates an account. set_balance_creating { let user: T::AccountId = account("user", 0, SEED); - let user_lookup = T::Lookup::unlookup(user.clone()); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); @@ -110,7 +110,7 @@ benchmarks_instance_pallet! { // Benchmark `set_balance` coming from ROOT account. This always kills an account. set_balance_killing { let user: T::AccountId = account("user", 0, SEED); - let user_lookup = T::Lookup::unlookup(user.clone()); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); @@ -127,7 +127,7 @@ benchmarks_instance_pallet! { force_transfer { let existential_deposit = T::ExistentialDeposit::get(); let source: T::AccountId = account("source", 0, SEED); - let source_lookup = T::Lookup::unlookup(source.clone()); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); @@ -135,7 +135,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: force_transfer(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount) verify { @@ -160,7 +160,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); // Create a bunch of users in storage. @@ -182,7 +182,7 @@ benchmarks_instance_pallet! { transfer_all { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give some multiple of the existential deposit let existential_deposit = T::ExistentialDeposit::get(); @@ -196,7 +196,7 @@ benchmarks_instance_pallet! { force_unreserve { let user: T::AccountId = account("user", 0, SEED); - let user_lookup = T::Lookup::unlookup(user.clone()); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); // Give some multiple of the existential deposit let existential_deposit = T::ExistentialDeposit::get(); diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index d3085152eba6c..683ebce2b1693 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -185,15 +185,13 @@ use sp_runtime::{ AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Saturating, StaticLookup, Zero, }, - ArithmeticError, DispatchError, FixedPointOperand, RuntimeDebug, + ArithmeticError, DispatchError, RuntimeDebug, }; use sp_std::{cmp, fmt::Debug, mem, ops::BitOr, prelude::*, result}; pub use weights::WeightInfo; pub use pallet::*; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -212,15 +210,13 @@ pub mod pallet { + MaybeSerializeDeserialize + Debug + MaxEncodedLen - + TypeInfo - + FixedPointOperand; + + TypeInfo; /// Handler for the unbalanced reduction when removing a dust account. type DustRemoval: OnUnbalanced>; /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The minimum amount required to keep an account open. #[pallet::constant] @@ -279,7 +275,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, - dest: AccountIdLookupOf, + dest: ::Source, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; @@ -307,7 +303,7 @@ pub mod pallet { )] pub fn set_balance( origin: OriginFor, - who: AccountIdLookupOf, + who: ::Source, #[pallet::compact] new_free: T::Balance, #[pallet::compact] new_reserved: T::Balance, ) -> DispatchResultWithPostInfo { @@ -357,8 +353,8 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, - source: AccountIdLookupOf, - dest: AccountIdLookupOf, + source: ::Source, + dest: ::Source, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -382,7 +378,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, - dest: AccountIdLookupOf, + dest: ::Source, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; @@ -411,7 +407,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer_all())] pub fn transfer_all( origin: OriginFor, - dest: AccountIdLookupOf, + dest: ::Source, keep_alive: bool, ) -> DispatchResult { use fungible::Inspect; @@ -429,7 +425,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_unreserve())] pub fn force_unreserve( origin: OriginFor, - who: AccountIdLookupOf, + who: ::Source, amount: T::Balance, ) -> DispatchResult { ensure_root(origin)?; @@ -477,7 +473,7 @@ pub mod pallet { VestingBalance, /// Account liquidity restrictions prevent withdrawal LiquidityRestrictions, - /// Balance too low to send value. + /// Balance too low to send value InsufficientBalance, /// Value too low to create account due to existential deposit ExistentialDeposit, @@ -494,7 +490,6 @@ pub mod pallet { /// The total units issued in the system. #[pallet::storage] #[pallet::getter(fn total_issuance)] - #[pallet::whitelist_storage] pub type TotalIssuance, I: 'static = ()> = StorageValue<_, T::Balance, ValueQuery>; /// The Balances pallet example of storing the balance of an account. @@ -1003,8 +998,6 @@ impl, I: 'static> Pallet { /// Is a no-op if: /// - the value to be moved is zero; or /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. - /// - /// NOTE: returns actual amount of transferred value in `Ok` case. fn do_transfer_reserved( slashed: &T::AccountId, beneficiary: &T::AccountId, @@ -1018,7 +1011,7 @@ impl, I: 'static> Pallet { if slashed == beneficiary { return match status { - Status::Free => Ok(value.saturating_sub(Self::unreserve(slashed, value))), + Status::Free => Ok(Self::unreserve(slashed, value)), Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), } } @@ -1149,19 +1142,15 @@ impl, I: 'static> fungible::Transfer for Pallet impl, I: 'static> fungible::Unbalanced for Pallet { fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - Self::mutate_account(who, |account| -> DispatchResult { - // fungibles::Unbalanced::decrease_balance didn't check account.reserved - // free = new_balance - reserved - account.free = - amount.checked_sub(&account.reserved).ok_or(ArithmeticError::Underflow)?; + Self::mutate_account(who, |account| { + account.free = amount; Self::deposit_event(Event::BalanceSet { who: who.clone(), free: account.free, reserved: account.reserved, }); - - Ok(()) - })? + })?; + Ok(()) } fn set_total_issuance(amount: Self::Balance) { @@ -1794,8 +1783,6 @@ where /// Unreserve some funds, returning any amount that was unable to be unreserved. /// /// Is a no-op if the value to be unreserved is zero or the account does not exist. - /// - /// NOTE: returns amount value which wasn't successfully unreserved. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { if value.is_zero() { return Zero::zero() diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 83944caf9f7ff..8f5470ae3cac2 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -24,7 +24,7 @@ macro_rules! decl_tests { ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { use crate::*; - use sp_runtime::{ArithmeticError, TokenError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; + use sp_runtime::{ArithmeticError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; use frame_support::{ assert_noop, assert_storage_noop, assert_ok, assert_err, traits::{ @@ -38,15 +38,15 @@ macro_rules! decl_tests { const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; - pub const CALL: &<$test as frame_system::Config>::RuntimeCall = - &RuntimeCall::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); + pub const CALL: &<$test as frame_system::Config>::Call = + &Call::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { DispatchInfo { weight: w, ..Default::default() } } - fn events() -> Vec { + fn events() -> Vec { let evt = System::events().into_iter().map(|evt| evt.event).collect::>(); System::reset_events(); @@ -188,14 +188,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(Weight::from_ref_time(1)), + &info_from_weight(1), 1, ).is_err()); assert_ok!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(Weight::from_ref_time(1)), + &info_from_weight(1), 1, )); @@ -206,14 +206,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(Weight::from_ref_time(1)), + &info_from_weight(1), 1, ).is_err()); assert!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(Weight::from_ref_time(1)), + &info_from_weight(1), 1, ).is_err()); }); @@ -314,7 +314,7 @@ macro_rules! decl_tests { <$ext_builder>::default().monied(true).build().execute_with(|| { assert_eq!(Balances::total_balance(&1), 10); assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { who: 1, amount: 10 })); + System::assert_last_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 10 })); assert_eq!(Balances::total_balance(&1), 20); assert_eq!(>::get(), 120); }); @@ -342,7 +342,7 @@ macro_rules! decl_tests { fn balance_works() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { who: 1, amount: 42 })); + System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 42 })); assert_eq!(Balances::free_balance(1), 42); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::total_balance(&1), 42); @@ -444,7 +444,7 @@ macro_rules! decl_tests { let _ = Balances::withdraw( &2, 11, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive ); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Withdraw { who: 2, amount: 11 })); + System::assert_last_event(Event::Balances(crate::Event::Withdraw { who: 2, amount: 11 })); assert_eq!(Balances::free_balance(2), 100); assert_eq!(>::get(), 100); }); @@ -505,7 +505,7 @@ macro_rules! decl_tests { assert_ok!(Balances::reserve(&1, 110)); assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); System::assert_last_event( - RuntimeEvent::Balances(crate::Event::ReserveRepatriated { from: 1, to: 2, amount: 41, destination_status: Status::Free }) + Event::Balances(crate::Event::ReserveRepatriated { from: 1, to: 2, amount: 41, destination_status: Status::Free }) ); assert_eq!(Balances::reserved_balance(1), 69); assert_eq!(Balances::free_balance(1), 0); @@ -528,22 +528,6 @@ macro_rules! decl_tests { }); } - #[test] - fn transferring_reserved_balance_to_yourself_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - assert_ok!(Balances::reserve(&1, 50)); - assert_ok!(Balances::repatriate_reserved(&1, &1, 50, Status::Free), 0); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance(1), 0); - - assert_ok!(Balances::reserve(&1, 50)); - assert_ok!(Balances::repatriate_reserved(&1, &1, 60, Status::Free), 10); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - #[test] fn transferring_reserved_balance_to_nonexistent_should_fail() { <$ext_builder>::default().build().execute_with(|| { @@ -740,18 +724,18 @@ macro_rules! decl_tests { System::set_block_number(2); assert_ok!(Balances::reserve(&1, 10)); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Reserved { who: 1, amount: 10 })); + System::assert_last_event(Event::Balances(crate::Event::Reserved { who: 1, amount: 10 })); System::set_block_number(3); assert!(Balances::unreserve(&1, 5).is_zero()); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); + System::assert_last_event(Event::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); System::set_block_number(4); assert_eq!(Balances::unreserve(&1, 6), 1); // should only unreserve 5 - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); + System::assert_last_event(Event::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); }); } @@ -766,9 +750,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - RuntimeEvent::System(system::Event::NewAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), + Event::System(system::Event::NewAccount { account: 1 }), + Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -778,9 +762,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 99 }), - RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }), + Event::System(system::Event::KilledAccount { account: 1 }), + Event::Balances(crate::Event::DustLost { account: 1, amount: 99 }), + Event::Balances(crate::Event::Slashed { who: 1, amount: 1 }), ] ); }); @@ -797,9 +781,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - RuntimeEvent::System(system::Event::NewAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), + Event::System(system::Event::NewAccount { account: 1 }), + Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -809,8 +793,8 @@ macro_rules! decl_tests { assert_eq!( events(), [ - RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 100 }), + Event::System(system::Event::KilledAccount { account: 1 }), + Event::Balances(crate::Event::Slashed { who: 1, amount: 100 }), ] ); }); @@ -825,43 +809,43 @@ macro_rules! decl_tests { /* User has no reference counter, so they can die in these scenarios */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); // Slashed completed in full assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 900 })); + System::assert_last_event(Event::Balances(crate::Event::Slashed { who: 1, amount: 900 })); // SCENARIO: Slash will kill account because not enough balance left. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); // Slashed completed in full assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(950), 0)); // Account is killed assert!(!System::account_exists(&1)); // SCENARIO: Over-slash will kill account, and report missing slash amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); // Slashed full free_balance, and reports 300 not slashed assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1000), 300)); // Account is dead assert!(!System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 400)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, and kill. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 350)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); // Account is dead because 50 reserved balance is not enough to keep alive assert!(!System::account_exists(&1)); // SCENARIO: Over-slash can take as much as possible from reserved, kill, and report missing amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 250)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); // Account is super dead @@ -870,7 +854,7 @@ macro_rules! decl_tests { /* User will now have a reference counter on them, keeping them alive in these scenarios */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests // Slashed completed in full assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); @@ -878,35 +862,35 @@ macro_rules! decl_tests { assert!(System::account_exists(&1)); // SCENARIO: Slash will take as much as possible without killing account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); // Slashed completed in full assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(900), 50)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash will not kill account, and report missing slash amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); // Slashed full free_balance minus ED, and reports 400 not slashed assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(900), 400)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 400)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 350)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); // Slashed full free_balance and 250 of reserved balance to leave ED assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take as much as possible from reserved and report missing amount. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 250)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1150), 150)); // Account is still alive @@ -926,28 +910,28 @@ macro_rules! decl_tests { /* User has no reference counter, so they can die in these scenarios */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Slash would kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(1_000), 0)); // Account is dead assert!(!System::account_exists(&1)); // SCENARIO: Over-slash would kill account, and reports left over slash. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); // Account is dead assert!(!System::account_exists(&1)); // SCENARIO: Over-slash does not take from free balance. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 300, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 300, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); // Account is alive because of free balance @@ -956,7 +940,7 @@ macro_rules! decl_tests { /* User has a reference counter, so they cannot die */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); @@ -964,21 +948,21 @@ macro_rules! decl_tests { assert!(System::account_exists(&1)); // SCENARIO: Slash as much as possible without killing. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); // Slashed as much as possible assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(950), 50)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash reports correctly, where reserved is needed to keep alive. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); // Slashed as much as possible assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(950), 350)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash reports correctly, where full reserved is removed. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 1_000)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 1_000)); // Slashed as much as possible assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); // Account is still alive @@ -1018,7 +1002,7 @@ macro_rules! decl_tests { .existential_deposit(100) .build() .execute_with(|| { - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 100, 100)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 100, 100)); assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); assert_eq!(Balances::total_balance(&1), 100); assert_eq!(Balances::total_balance(&2), 100); @@ -1032,32 +1016,32 @@ macro_rules! decl_tests { .build() .execute_with(|| { // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 0)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); // transfer all and allow death assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); assert_eq!(Balances::total_balance(&1), 0); assert_eq!(Balances::total_balance(&2), 200); // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 0)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); // transfer all and keep alive assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); assert_eq!(Balances::total_balance(&1), 100); assert_eq!(Balances::total_balance(&2), 100); // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 10)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); // transfer all and allow death w/ reserved assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); assert_eq!(Balances::total_balance(&1), 0); assert_eq!(Balances::total_balance(&2), 200); // setup - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 10)); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); // transfer all and keep alive w/ reserved assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); assert_eq!(Balances::total_balance(&1), 100); @@ -1183,25 +1167,6 @@ macro_rules! decl_tests { }); } - #[test] - fn reserved_named_to_yourself_should_work() { - <$ext_builder>::default().build().execute_with(|| { - let _ = Balances::deposit_creating(&1, 110); - - let id = [1u8; 8]; - - assert_ok!(Balances::reserve_named(&id, &1, 50)); - assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 50, Status::Free), 0); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - - assert_ok!(Balances::reserve_named(&id, &1, 50)); - assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 60, Status::Free), 10); - assert_eq!(Balances::free_balance(1), 110); - assert_eq!(Balances::reserved_balance_named(&id, &1), 0); - }); - } - #[test] fn ensure_reserved_named_should_work() { <$ext_builder>::default().build().execute_with(|| { @@ -1281,7 +1246,7 @@ macro_rules! decl_tests { let _ = Balances::deposit_creating(&1, 111); assert_ok!(frame_system::Pallet::::inc_consumers(&1)); assert_noop!( - Balances::set_balance(RuntimeOrigin::root(), 1, 0, 0), + Balances::set_balance(Origin::root(), 1, 0, 0), DispatchError::ConsumerRemaining, ); }); @@ -1291,170 +1256,12 @@ macro_rules! decl_tests { fn set_balance_handles_total_issuance() { <$ext_builder>::default().build().execute_with(|| { let old_total_issuance = Balances::total_issuance(); - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1337, 69, 42)); + assert_ok!(Balances::set_balance(Origin::root(), 1337, 69, 42)); assert_eq!(Balances::total_issuance(), old_total_issuance + 69 + 42); assert_eq!(Balances::total_balance(&1337), 69 + 42); assert_eq!(Balances::free_balance(&1337), 69); assert_eq!(Balances::reserved_balance(&1337), 42); }); } - - #[test] - fn fungible_unbalanced_trait_set_balance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_eq!(>::balance(&1337), 0); - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!(>::balance(&1337), 100); - - assert_ok!(Balances::reserve(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - - assert_noop!(>::set_balance(&1337, 0), ArithmeticError::Underflow); - - assert_ok!(>::set_balance(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 0); - assert_eq!(Balances::reserved_balance(1337), 60); - }); - } - - #[test] - fn fungible_unbalanced_trait_set_total_issuance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_eq!(>::total_issuance(), 0); - >::set_total_issuance(100); - assert_eq!(>::total_issuance(), 100); - }); - } - - #[test] - fn fungible_unbalanced_trait_decrease_balance_simple_works() { - <$ext_builder>::default().build().execute_with(|| { - // An Account that starts at 100 - assert_ok!(>::set_balance(&1337, 100)); - // and reserves 50 - assert_ok!(Balances::reserve(&1337, 50)); - // and is decreased by 20 - assert_ok!(>::decrease_balance(&1337, 20)); - // should end up at 80. - assert_eq!(>::balance(&1337), 80); - }); - } - - #[test] - fn fungible_unbalanced_trait_decrease_balance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!(>::balance(&1337), 100); - - assert_noop!( - >::decrease_balance(&1337, 101), - TokenError::NoFunds - ); - assert_eq!( - >::decrease_balance(&1337, 100), - Ok(100) - ); - assert_eq!(>::balance(&1337), 0); - - // free: 40, reserved: 60 - assert_ok!(>::set_balance(&1337, 100)); - assert_ok!(Balances::reserve(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - assert_noop!( - >::decrease_balance(&1337, 41), - TokenError::NoFunds - ); - assert_eq!( - >::decrease_balance(&1337, 40), - Ok(40) - ); - assert_eq!(>::balance(&1337), 60); - assert_eq!(Balances::free_balance(1337), 0); - assert_eq!(Balances::reserved_balance(1337), 60); - }); - } - - #[test] - fn fungible_unbalanced_trait_decrease_balance_at_most_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!(>::balance(&1337), 100); - - assert_eq!( - >::decrease_balance_at_most(&1337, 101), - 100 - ); - assert_eq!(>::balance(&1337), 0); - - assert_ok!(>::set_balance(&1337, 100)); - assert_eq!( - >::decrease_balance_at_most(&1337, 100), - 100 - ); - assert_eq!(>::balance(&1337), 0); - - // free: 40, reserved: 60 - assert_ok!(>::set_balance(&1337, 100)); - assert_ok!(Balances::reserve(&1337, 60)); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - assert_eq!( - >::decrease_balance_at_most(&1337, 0), - 0 - ); - assert_eq!(Balances::free_balance(1337) , 40); - assert_eq!(Balances::reserved_balance(1337), 60); - assert_eq!( - >::decrease_balance_at_most(&1337, 10), - 10 - ); - assert_eq!(Balances::free_balance(1337), 30); - assert_eq!( - >::decrease_balance_at_most(&1337, 200), - 30 - ); - assert_eq!(>::balance(&1337), 60); - assert_eq!(Balances::free_balance(1337), 0); - assert_eq!(Balances::reserved_balance(1337), 60); - }); - } - - #[test] - fn fungible_unbalanced_trait_increase_balance_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_noop!( - >::increase_balance(&1337, 0), - TokenError::BelowMinimum - ); - assert_eq!( - >::increase_balance(&1337, 1), - Ok(1) - ); - assert_noop!( - >::increase_balance(&1337, u64::MAX), - ArithmeticError::Overflow - ); - }); - } - - #[test] - fn fungible_unbalanced_trait_increase_balance_at_most_works() { - <$ext_builder>::default().build().execute_with(|| { - assert_eq!( - >::increase_balance_at_most(&1337, 0), - 0 - ); - assert_eq!( - >::increase_balance_at_most(&1337, 1), - 1 - ); - assert_eq!( - >::increase_balance_at_most(&1337, u64::MAX), - u64::MAX - 1 - ); - }); - } } } diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index f8a8fdd1851d4..4ab913cf1411a 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -21,10 +21,9 @@ use crate::{self as pallet_balances, decl_tests, Config, Pallet}; use frame_support::{ - dispatch::DispatchInfo, parameter_types, traits::{ConstU32, ConstU64, ConstU8}, - weights::{IdentityFee, Weight}, + weights::{DispatchInfo, IdentityFee, Weight}, }; use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; @@ -47,9 +46,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(1024); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { @@ -57,16 +54,16 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -80,7 +77,7 @@ impl frame_system::Config for Test { } impl pallet_transaction_payment::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter, ()>; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -91,7 +88,7 @@ impl pallet_transaction_payment::Config for Test { impl Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type MaxLocks = (); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 152a5da37410f..48c6574c3b39f 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -21,10 +21,9 @@ use crate::{self as pallet_balances, decl_tests, Config, Pallet}; use frame_support::{ - dispatch::DispatchInfo, parameter_types, traits::{ConstU32, ConstU64, ConstU8, StorageMapShim}, - weights::{IdentityFee, Weight}, + weights::{DispatchInfo, IdentityFee, Weight}, }; use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; @@ -48,9 +47,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(1024); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { @@ -58,16 +55,16 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -81,7 +78,7 @@ impl frame_system::Config for Test { } impl pallet_transaction_payment::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter, ()>; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -92,7 +89,7 @@ impl pallet_transaction_payment::Config for Test { impl Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = StorageMapShim, system::Provider, u64, super::AccountData>; @@ -161,9 +158,9 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - RuntimeEvent::System(system::Event::NewAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), + Event::System(system::Event::NewAccount { account: 1 }), + Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -171,10 +168,7 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!(res, (NegativeImbalance::new(98), 0)); // no events - assert_eq!( - events(), - [RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 98 })] - ); + assert_eq!(events(), [Event::Balances(crate::Event::Slashed { who: 1, amount: 98 })]); let res = Balances::slash(&1, 1); assert_eq!(res, (NegativeImbalance::new(1), 0)); @@ -182,9 +176,9 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), - RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 1 }), - RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }) + Event::System(system::Event::KilledAccount { account: 1 }), + Event::Balances(crate::Event::DustLost { account: 1, amount: 1 }), + Event::Balances(crate::Event::Slashed { who: 1, amount: 1 }) ] ); }); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 90363140000e8..4c028840d553c 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -51,9 +51,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(1024); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { @@ -61,16 +59,16 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -93,7 +91,7 @@ impl OnUnbalanced> for OnDustRemoval { impl Config for Test { type Balance = u64; type DustRemoval = OnDustRemoval; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = StorageMapShim, system::Provider, u64, super::AccountData>; @@ -159,19 +157,16 @@ fn transfer_dust_removal_tst1_should_work() { // Verify the events assert_eq!(System::events().len(), 12); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { + System::assert_has_event(Event::Balances(crate::Event::Transfer { from: 2, to: 3, amount: 450, })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { + System::assert_has_event(Event::Balances(crate::Event::DustLost { account: 2, amount: 50, })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { - who: 1, - amount: 50, - })); + System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); }); } @@ -197,19 +192,16 @@ fn transfer_dust_removal_tst2_should_work() { // Verify the events assert_eq!(System::events().len(), 10); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { + System::assert_has_event(Event::Balances(crate::Event::Transfer { from: 2, to: 1, amount: 450, })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { + System::assert_has_event(Event::Balances(crate::Event::DustLost { account: 2, amount: 50, })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { - who: 1, - amount: 50, - })); + System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); }); } @@ -244,21 +236,18 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Verify the events assert_eq!(System::events().len(), 11); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::ReserveRepatriated { + System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated { from: 2, to: 1, amount: 450, destination_status: Status::Free, })); - System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { + System::assert_has_event(Event::Balances(crate::Event::DustLost { account: 2, amount: 50, })); - System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { - who: 1, - amount: 50, - })); + System::assert_last_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); }); } diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 6324745fd4310..f612d31997996 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/balances/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/balances/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,52 +58,45 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 48_134 nanoseconds. - Weight::from_ref_time(48_811_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (41_860_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - // Minimum execution time: 36_586 nanoseconds. - Weight::from_ref_time(36_966_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (32_760_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - // Minimum execution time: 28_486 nanoseconds. - Weight::from_ref_time(28_940_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (22_279_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - // Minimum execution time: 31_225 nanoseconds. - Weight::from_ref_time(31_946_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (25_488_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - // Minimum execution time: 47_347 nanoseconds. - Weight::from_ref_time(48_005_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (42_190_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - // Minimum execution time: 41_668 nanoseconds. - Weight::from_ref_time(42_232_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (37_789_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - // Minimum execution time: 23_741 nanoseconds. - Weight::from_ref_time(24_073_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (20_056_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } @@ -114,51 +104,44 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 48_134 nanoseconds. - Weight::from_ref_time(48_811_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (41_860_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - // Minimum execution time: 36_586 nanoseconds. - Weight::from_ref_time(36_966_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (32_760_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - // Minimum execution time: 28_486 nanoseconds. - Weight::from_ref_time(28_940_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (22_279_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - // Minimum execution time: 31_225 nanoseconds. - Weight::from_ref_time(31_946_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (25_488_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - // Minimum execution time: 47_347 nanoseconds. - Weight::from_ref_time(48_005_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (42_190_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - // Minimum execution time: 41_668 nanoseconds. - Weight::from_ref_time(42_232_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (37_789_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - // Minimum execution time: 23_741 nanoseconds. - Weight::from_ref_time(24_073_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (20_056_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml index 62fabd387a167..8da182f1c29fc 100644 --- a/frame/beefy-mmr/Cargo.toml +++ b/frame/beefy-mmr/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/paritytech/substrate" homepage = "https://substrate.io" [dependencies] -array-bytes = { version = "4.1", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +hex = { version = "0.4", optional = true } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } @@ -27,18 +27,18 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -array-bytes = "4.1" +hex-literal = "0.3" sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } [features] default = ["std"] std = [ - "array-bytes", "beefy-merkle-tree/std", "beefy-primitives/std", "codec/std", "frame-support/std", "frame-system/std", + "hex", "log/std", "pallet-beefy/std", "pallet-mmr/std", @@ -50,4 +50,3 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index a097da0fc30fd..f30a418def042 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -9,22 +9,23 @@ description = "A no-std/Substrate compatible library to construct binary merkle homepage = "https://substrate.io" [dependencies] -array-bytes = { version = "4.1", optional = true } +hex = { version = "0.4", default-features = false, optional = true } log = { version = "0.4", default-features = false, optional = true } +tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } beefy-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/beefy" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -array-bytes = "4.1" env_logger = "0.9" +hex = "0.4" +hex-literal = "0.3" [features] -debug = ["array-bytes", "log"] -default = ["debug", "std"] +debug = ["hex", "hex/std", "log"] +default = ["debug", "keccak", "std"] +keccak = ["tiny-keccak"] std = [ "beefy-primitives/std", - "sp-api/std", - "sp-runtime/std" + "sp-api/std" ] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index f88fb89acaaab..664fd18199dd0 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -25,49 +25,88 @@ //! compilation targets. //! //! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the -//! same hasher as the inner nodes. +//! same [Hasher] as the inner nodes. //! Inner nodes are created by concatenating child hashes and hashing again. The implementation //! does not perform any sorting of the input data (leaves) nor when inner nodes are created. //! -//! If the number of leaves is not even, last leaf (hash of) is promoted to the upper layer. +//! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. -pub use sp_runtime::traits::Keccak256; -use sp_runtime::{app_crypto::sp_core, sp_std, traits::Hash as HashT}; -use sp_std::{vec, vec::Vec}; +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use beefy_primitives::mmr::{BeefyAuthoritySet, BeefyNextAuthoritySet}; +/// Supported hashing output size. +/// +/// The size is restricted to 32 bytes to allow for a more optimised implementation. +pub type Hash = [u8; 32]; + +/// Generic hasher trait. +/// +/// Implement the function to support custom way of hashing data. +/// The implementation must return a [Hash](type@Hash) type, so only 32-byte output hashes are +/// supported. +pub trait Hasher { + /// Hash given arbitrary-length piece of data. + fn hash(data: &[u8]) -> Hash; +} + +#[cfg(feature = "keccak")] +mod keccak256 { + use tiny_keccak::{Hasher as _, Keccak}; + + /// Keccak256 hasher implementation. + pub struct Keccak256; + impl Keccak256 { + /// Hash given data. + pub fn hash(data: &[u8]) -> super::Hash { + ::hash(data) + } + } + impl super::Hasher for Keccak256 { + fn hash(data: &[u8]) -> super::Hash { + let mut keccak = Keccak::v256(); + keccak.update(data); + let mut output = [0_u8; 32]; + keccak.finalize(&mut output); + output + } + } +} +#[cfg(feature = "keccak")] +pub use keccak256::Keccak256; + /// Construct a root hash of a Binary Merkle Tree created from given leaves. /// /// See crate-level docs for details about Merkle Tree construction. /// /// In case an empty list of leaves is passed the function returns a 0-filled hash. -pub fn merkle_root(leaves: I) -> H::Output +pub fn merkle_root(leaves: I) -> Hash where - H: HashT, - H::Output: Default + AsRef<[u8]>, - I: IntoIterator, - I::Item: AsRef<[u8]>, + H: Hasher, + I: IntoIterator, + T: AsRef<[u8]>, { - let iter = leaves.into_iter().map(|l| ::hash(l.as_ref())); - merkelize::(iter, &mut ()).into() + let iter = leaves.into_iter().map(|l| H::hash(l.as_ref())); + merkelize::(iter, &mut ()) } -fn merkelize(leaves: I, visitor: &mut V) -> H::Output +fn merkelize(leaves: I, visitor: &mut V) -> Hash where - H: HashT, - H::Output: Default + AsRef<[u8]>, - V: Visitor, - I: Iterator, + H: Hasher, + V: Visitor, + I: Iterator, { - let upper = Vec::with_capacity((leaves.size_hint().1.unwrap_or(0).saturating_add(1)) / 2); + let upper = Vec::with_capacity(leaves.size_hint().0); let mut next = match merkelize_row::(leaves, upper, visitor) { Ok(root) => return root, - Err(next) if next.is_empty() => return H::Output::default(), + Err(next) if next.is_empty() => return Hash::default(), Err(next) => next, }; - let mut upper = Vec::with_capacity((next.len().saturating_add(1)) / 2); + let mut upper = Vec::with_capacity((next.len() + 1) / 2); loop { visitor.move_up(); @@ -86,14 +125,14 @@ where /// /// The structure contains all necessary data to later on verify the proof and the leaf itself. #[derive(Debug, PartialEq, Eq)] -pub struct MerkleProof { +pub struct MerkleProof { /// Root hash of generated merkle tree. - pub root: H, + pub root: Hash, /// Proof items (does not contain the leaf hash, nor the root obviously). /// /// This vec contains all inner node hashes necessary to reconstruct the root hash given the /// leaf hash. - pub proof: Vec, + pub proof: Vec, /// Number of leaves in the original tree. /// /// This is needed to detect a case where we have an odd number of leaves that "get promoted" @@ -102,14 +141,14 @@ pub struct MerkleProof { /// Index of the leaf the proof is for (0-based). pub leaf_index: usize, /// Leaf content. - pub leaf: L, + pub leaf: T, } /// A trait of object inspecting merkle root creation. /// /// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified /// about tree traversal. -trait Visitor { +trait Visitor { /// We are moving one level up in the tree. fn move_up(&mut self); @@ -119,13 +158,13 @@ trait Visitor { /// The method will also visit the `root` hash (level 0). /// /// The `index` is an index of `left` item. - fn visit(&mut self, index: usize, left: &Option, right: &Option); + fn visit(&mut self, index: usize, left: &Option, right: &Option); } /// No-op implementation of the visitor. -impl Visitor for () { +impl Visitor for () { fn move_up(&mut self) {} - fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} + fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} } /// Construct a Merkle Proof for leaves given by indices. @@ -138,17 +177,16 @@ impl Visitor for () { /// # Panic /// /// The function will panic if given `leaf_index` is greater than the number of leaves. -pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof +pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof where - H: HashT, - H::Output: Default + Copy + AsRef<[u8]>, + H: Hasher, I: IntoIterator, I::IntoIter: ExactSizeIterator, T: AsRef<[u8]>, { let mut leaf = None; let iter = leaves.into_iter().enumerate().map(|(idx, l)| { - let hash = ::hash(l.as_ref()); + let hash = H::hash(l.as_ref()); if idx == leaf_index { leaf = Some(l); } @@ -156,23 +194,23 @@ where }); /// The struct collects a proof for single leaf. - struct ProofCollection { - proof: Vec, + struct ProofCollection { + proof: Vec, position: usize, } - impl ProofCollection { + impl ProofCollection { fn new(position: usize) -> Self { ProofCollection { proof: Default::default(), position } } } - impl Visitor for ProofCollection { + impl Visitor for ProofCollection { fn move_up(&mut self) { self.position /= 2; } - fn visit(&mut self, index: usize, left: &Option, right: &Option) { + fn visit(&mut self, index: usize, left: &Option, right: &Option) { // we are at left branch - right goes to the proof. if self.position == index { if let Some(right) = right { @@ -197,11 +235,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkle_proof] Proof: {:?}", - collect_proof - .proof - .iter() - .map(|s| array_bytes::bytes2hex("", s.as_ref())) - .collect::>() + collect_proof.proof.iter().map(hex::encode).collect::>() ); MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } @@ -212,19 +246,25 @@ where /// Can be either a value that needs to be hashed first, /// or the hash itself. #[derive(Debug, PartialEq, Eq)] -pub enum Leaf<'a, H> { +pub enum Leaf<'a> { /// Leaf content. Value(&'a [u8]), /// Hash of the leaf content. - Hash(H), + Hash(Hash), } -impl<'a, H, T: AsRef<[u8]>> From<&'a T> for Leaf<'a, H> { +impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { fn from(v: &'a T) -> Self { Leaf::Value(v.as_ref()) } } +impl<'a> From for Leaf<'a> { + fn from(v: Hash) -> Self { + Leaf::Hash(v) + } +} + /// Verify Merkle Proof correctness versus given root hash. /// /// The proof is NOT expected to contain leaf hash as the first @@ -233,47 +273,45 @@ impl<'a, H, T: AsRef<[u8]>> From<&'a T> for Leaf<'a, H> { /// /// The proof must not contain the root hash. pub fn verify_proof<'a, H, P, L>( - root: &'a H::Output, + root: &'a Hash, proof: P, number_of_leaves: usize, leaf_index: usize, leaf: L, ) -> bool where - H: HashT, - H::Output: PartialEq + AsRef<[u8]>, - P: IntoIterator, - L: Into>, + H: Hasher, + P: IntoIterator, + L: Into>, { if leaf_index >= number_of_leaves { return false } let leaf_hash = match leaf.into() { - Leaf::Value(content) => ::hash(content), + Leaf::Value(content) => H::hash(content), Leaf::Hash(hash) => hash, }; - let hash_len = ::LENGTH; - let mut combined = vec![0_u8; hash_len * 2]; + let mut combined = [0_u8; 64]; let mut position = leaf_index; let mut width = number_of_leaves; let computed = proof.into_iter().fold(leaf_hash, |a, b| { if position % 2 == 1 || position + 1 == width { - combined[..hash_len].copy_from_slice(&b.as_ref()); - combined[hash_len..].copy_from_slice(&a.as_ref()); + combined[0..32].copy_from_slice(&b); + combined[32..64].copy_from_slice(&a); } else { - combined[..hash_len].copy_from_slice(&a.as_ref()); - combined[hash_len..].copy_from_slice(&b.as_ref()); + combined[0..32].copy_from_slice(&a); + combined[32..64].copy_from_slice(&b); } - let hash = ::hash(&combined); + let hash = H::hash(&combined); #[cfg(feature = "debug")] log::debug!( "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", - array_bytes::bytes2hex("", &a.as_ref()), - array_bytes::bytes2hex("", &b.as_ref()), - array_bytes::bytes2hex("", &hash.as_ref()), - array_bytes::bytes2hex("", &combined.as_ref()) + hex::encode(a), + hex::encode(b), + hex::encode(hash), + hex::encode(combined) ); position /= 2; width = ((width - 1) / 2) + 1; @@ -290,41 +328,35 @@ where /// empty iterator) an `Err` with the inner nodes of upper layer is returned. fn merkelize_row( mut iter: I, - mut next: Vec, + mut next: Vec, visitor: &mut V, -) -> Result> +) -> Result> where - H: HashT, - H::Output: AsRef<[u8]>, - V: Visitor, - I: Iterator, + H: Hasher, + V: Visitor, + I: Iterator, { #[cfg(feature = "debug")] log::debug!("[merkelize_row]"); next.clear(); - let hash_len = ::LENGTH; let mut index = 0; - let mut combined = vec![0_u8; hash_len * 2]; + let mut combined = [0_u8; 64]; loop { let a = iter.next(); let b = iter.next(); visitor.visit(index, &a, &b); #[cfg(feature = "debug")] - log::debug!( - " {:?}\n {:?}", - a.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())), - b.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())) - ); + log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); index += 2; match (a, b) { (Some(a), Some(b)) => { - combined[..hash_len].copy_from_slice(a.as_ref()); - combined[hash_len..].copy_from_slice(b.as_ref()); + combined[0..32].copy_from_slice(&a); + combined[32..64].copy_from_slice(&b); - next.push(::hash(&combined)); + next.push(H::hash(&combined)); }, // Odd number of items. Promote the item to the upper layer. (Some(a), None) if !next.is_empty() => { @@ -337,7 +369,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkelize_row] Next: {:?}", - next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() + next.iter().map(hex::encode).collect::>() ); return Err(next) }, @@ -349,6 +381,7 @@ sp_api::decl_runtime_apis! { /// API useful for BEEFY light clients. pub trait BeefyMmrApi where + H: From + Into, BeefyAuthoritySet: sp_api::Decode, { /// Return the currently active BEEFY authority set proof. @@ -362,7 +395,7 @@ sp_api::decl_runtime_apis! { #[cfg(test)] mod tests { use super::*; - use crate::sp_core::H256; + use hex_literal::hex; #[test] fn should_generate_empty_root() { @@ -371,11 +404,11 @@ mod tests { let data: Vec<[u8; 1]> = Default::default(); // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", out.as_ref()), + hex::encode(&out), "0000000000000000000000000000000000000000000000000000000000000000" ); } @@ -384,16 +417,14 @@ mod tests { fn should_generate_single_root() { // given let _ = env_logger::try_init(); - let data = vec![array_bytes::hex2array_unchecked::<20>( - "E04CC55ebEE1cBCE552f250e85c57B70B2E2625b", - )]; + let data = vec![hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b")]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", out.as_ref()), + hex::encode(&out), "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" ); } @@ -403,16 +434,16 @@ mod tests { // given let _ = env_logger::try_init(); let data = vec![ - array_bytes::hex2array_unchecked::<20>("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), - array_bytes::hex2array_unchecked::<20>("25451A4de12dcCc2D166922fA938E900fCc4ED24"), + hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), + hex!("25451A4de12dcCc2D166922fA938E900fCc4ED24"), ]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", out.as_ref()), + hex::encode(&out), "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" ); } @@ -421,10 +452,7 @@ mod tests { fn should_generate_root_complex() { let _ = env_logger::try_init(); let test = |root, data| { - assert_eq!( - array_bytes::bytes2hex("", &merkle_root::(data).as_ref()), - root - ); + assert_eq!(hex::encode(&merkle_root::(data)), root); }; test( @@ -483,20 +511,11 @@ mod tests { )); // then - assert_eq!( - array_bytes::bytes2hex("", &proof0.root.as_ref()), - array_bytes::bytes2hex("", &proof1.root.as_ref()) - ); - assert_eq!( - array_bytes::bytes2hex("", &proof2.root.as_ref()), - array_bytes::bytes2hex("", &proof1.root.as_ref()) - ); + assert_eq!(hex::encode(proof0.root), hex::encode(proof1.root)); + assert_eq!(hex::encode(proof2.root), hex::encode(proof1.root)); assert!(!verify_proof::( - &array_bytes::hex2array_unchecked( - "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239" - ) - .into(), + &hex!("fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239"), proof0.proof, data.len(), proof0.leaf_index, @@ -504,7 +523,7 @@ mod tests { )); assert!(!verify_proof::( - &proof0.root.into(), + &proof0.root, vec![], data.len(), proof0.leaf_index, @@ -760,23 +779,17 @@ mod tests { "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", ]; - let root: H256 = array_bytes::hex2array_unchecked( - "72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53", - ) - .into(); + let root = hex!("72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53"); let data = addresses .into_iter() - .map(|address| array_bytes::hex2bytes_unchecked(&address)) + .map(|address| hex::decode(&address[2..]).unwrap()) .collect::>(); for l in 0..data.len() { // when let proof = merkle_proof::(data.clone(), l); - assert_eq!( - array_bytes::bytes2hex("", &proof.root.as_ref()), - array_bytes::bytes2hex("", &root.as_ref()) - ); + assert_eq!(hex::encode(&proof.root), hex::encode(&root)); assert_eq!(proof.leaf_index, l); assert_eq!(&proof.leaf, &data[l]); @@ -797,29 +810,14 @@ mod tests { MerkleProof { root, proof: vec![ - array_bytes::hex2array_unchecked( - "340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f" - ) - .into(), - array_bytes::hex2array_unchecked( - "ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f" - ) - .into(), - array_bytes::hex2array_unchecked( - "d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79" - ) - .into(), - array_bytes::hex2array_unchecked( - "ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e" - ) - .into(), + hex!("340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f"), + hex!("ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f"), + hex!("d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79"), + hex!("ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e"), ], number_of_leaves: data.len(), leaf_index: data.len() - 1, - leaf: array_bytes::hex2array_unchecked::<20>( - "c26B34D375533fFc4c5276282Fa5D660F3d8cbcB" - ) - .to_vec(), + leaf: hex!("c26B34D375533fFc4c5276282Fa5D660F3d8cbcB").to_vec(), } ); } diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 0b7fc22cd279b..456d6e77aa8eb 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -33,7 +33,7 @@ //! //! and thanks to versioning can be easily updated in the future. -use sp_runtime::traits::{Convert, Member}; +use sp_runtime::traits::{Convert, Hash, Member}; use sp_std::prelude::*; use beefy_primitives::{ @@ -73,8 +73,12 @@ where /// Convert BEEFY secp256k1 public keys into Ethereum addresses pub struct BeefyEcdsaToEthereum; impl Convert> for BeefyEcdsaToEthereum { - fn convert(beefy_id: beefy_primitives::crypto::AuthorityId) -> Vec { - sp_core::ecdsa::Public::from(beefy_id) + fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { + sp_core::ecdsa::Public::try_from(a.as_ref()) + .map_err(|_| { + log::error!(target: "runtime::beefy", "Invalid BEEFY PublicKey format!"); + }) + .unwrap_or(sp_core::ecdsa::Public::from_raw([0u8; 33])) .to_eth_address() .map(|v| v.to_vec()) .map_err(|_| { @@ -138,7 +142,10 @@ pub mod pallet { StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; } -impl LeafDataProvider for Pallet { +impl LeafDataProvider for Pallet +where + MerkleRootOf: From + Into, +{ type LeafData = MmrLeaf< ::BlockNumber, ::Hash, @@ -156,9 +163,19 @@ impl LeafDataProvider for Pallet { } } +impl beefy_merkle_tree::Hasher for Pallet +where + MerkleRootOf: Into, +{ + fn hash(data: &[u8]) -> beefy_merkle_tree::Hash { + ::Hashing::hash(data).into() + } +} + impl beefy_primitives::OnNewValidatorSet<::BeefyId> for Pallet where T: pallet::Config, + MerkleRootOf: From + Into, { /// Compute and cache BEEFY authority sets based on updated BEEFY validator sets. fn on_new_validator_set( @@ -173,7 +190,10 @@ where } } -impl Pallet { +impl Pallet +where + MerkleRootOf: From + Into, +{ /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { Pallet::::beefy_authorities() @@ -200,10 +220,7 @@ impl Pallet { .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); let len = beefy_addresses.len() as u32; - let root = beefy_merkle_tree::merkle_root::<::Hashing, _>( - beefy_addresses, - ) - .into(); + let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); BeefyAuthoritySet { id, len, root } } } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 0a64ad3fc9976..8a673c9d4e914 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -67,16 +67,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -90,7 +90,7 @@ impl frame_system::Config for Test { } impl pallet_session::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; @@ -147,10 +147,9 @@ impl BeefyDataProvider> for DummyDataProvider { fn extra_data() -> Vec { let mut col = vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])]; col.sort(); - beefy_merkle_tree::merkle_root::<::Hashing, _>( + beefy_merkle_tree::merkle_root::, _, _>( col.into_iter().map(|pair| pair.encode()), ) - .as_ref() .to_vec() } } diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index 1826331f59e53..eaa50004ae848 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -22,6 +22,7 @@ use beefy_primitives::{ ValidatorSet, }; use codec::{Decode, Encode}; +use hex_literal::hex; use sp_core::H256; use sp_io::TestExternalities; @@ -69,9 +70,9 @@ fn should_contain_mmr_digest() { beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" - ))) + beefy_log(ConsensusLog::MmrRoot( + hex!("95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc").into() + )) ] ); @@ -84,15 +85,15 @@ fn should_contain_mmr_digest() { beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" - ))), + beefy_log(ConsensusLog::MmrRoot( + hex!("95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc").into() + )), beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(3), mock_beefy_id(4)], 2).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2" - ))), + beefy_log(ConsensusLog::MmrRoot( + hex!("a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2").into() + )), ] ); }); @@ -100,8 +101,8 @@ fn should_contain_mmr_digest() { #[test] fn should_contain_valid_leaf_data() { - fn node_offchain_key(pos: usize, parent_hash: H256) -> Vec { - (::INDEXING_PREFIX, pos as u64, parent_hash).encode() + fn node_offchain_key(parent_hash: H256, pos: usize) -> Vec { + (::INDEXING_PREFIX, parent_hash, pos as u64).encode() } let mut ext = new_test_ext(vec![1, 2, 3, 4]); @@ -110,7 +111,7 @@ fn should_contain_valid_leaf_data() { >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(0, parent_hash)); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 0)); assert_eq!( mmr_leaf, MmrLeaf { @@ -119,13 +120,11 @@ fn should_contain_valid_leaf_data() { beefy_next_authority_set: BeefyNextAuthoritySet { id: 2, len: 2, - root: array_bytes::hex_n_into_unchecked( - "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5" - ) + root: hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5") + .into(), }, - leaf_extra: array_bytes::hex2bytes_unchecked( - "55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648" - ) + leaf_extra: hex!("55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648") + .to_vec(), } ); @@ -135,7 +134,7 @@ fn should_contain_valid_leaf_data() { >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(1, parent_hash)); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 1)); assert_eq!( mmr_leaf, MmrLeaf { @@ -144,13 +143,11 @@ fn should_contain_valid_leaf_data() { beefy_next_authority_set: BeefyNextAuthoritySet { id: 3, len: 2, - root: array_bytes::hex_n_into_unchecked( - "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5" - ) + root: hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5") + .into(), }, - leaf_extra: array_bytes::hex2bytes_unchecked( - "55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648" - ) + leaf_extra: hex!("55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648") + .to_vec() } ); } @@ -164,9 +161,8 @@ fn should_update_authorities() { // check current authority set assert_eq!(0, auth_set.id); assert_eq!(2, auth_set.len); - let want = array_bytes::hex_n_into_unchecked::( - "176e73f1bf656478b728e28dd1a7733c98621b8acf830bff585949763dca7a96", - ); + let want: H256 = + hex!("176e73f1bf656478b728e28dd1a7733c98621b8acf830bff585949763dca7a96").into(); assert_eq!(want, auth_set.root); // next authority set should have same validators but different id @@ -184,9 +180,8 @@ fn should_update_authorities() { assert_eq!(1, auth_set.id); // check next auth set assert_eq!(2, next_auth_set.id); - let want = array_bytes::hex_n_into_unchecked::( - "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5", - ); + let want: H256 = + hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5").into(); assert_eq!(2, next_auth_set.len); assert_eq!(want, next_auth_set.root); @@ -200,9 +195,8 @@ fn should_update_authorities() { assert_eq!(2, auth_set.id); // check next auth set assert_eq!(3, next_auth_set.id); - let want = array_bytes::hex_n_into_unchecked::( - "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5", - ); + let want: H256 = + hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5").into(); assert_eq!(2, next_auth_set.len); assert_eq!(want, next_auth_set.root); }); diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml index 84aa8c7757c45..eecce963d19f0 100644 --- a/frame/beefy/Cargo.toml +++ b/frame/beefy/Cargo.toml @@ -37,4 +37,3 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index 305b158124b67..fce531d3f5dd7 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -159,8 +159,7 @@ impl Pallet { } let bounded_authorities = - BoundedSlice::::try_from(authorities.as_slice()) - .map_err(|_| ())?; + BoundedSlice::::try_from(authorities.as_slice())?; let id = 0; >::put(bounded_authorities); diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs index ad3a672333dd5..3bb59c7c39485 100644 --- a/frame/beefy/src/mock.rs +++ b/frame/beefy/src/mock.rs @@ -62,16 +62,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -95,7 +95,7 @@ parameter_types! { } impl pallet_session::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 61aa2b9b900c6..4205274b5dbc3 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -23,7 +23,6 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../../primitives/runtime-interface" } @@ -31,8 +30,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives sp-storage = { version = "6.0.0", default-features = false, path = "../../primitives/storage" } [dev-dependencies] -array-bytes = "4.1" -rusty-fork = { version = "0.3.0", default-features = false } +hex-literal = "0.3.4" sp-keystore = { version = "0.12.0", path = "../../primitives/keystore" } [features] @@ -47,13 +45,10 @@ std = [ "serde", "sp-api/std", "sp-application-crypto/std", - "sp-core/std", "sp-io/std", "sp-runtime-interface/std", "sp-runtime/std", "sp-std/std", "sp-storage/std", ] -runtime-benchmarks = [ - "frame-system/runtime-benchmarks", -] +runtime-benchmarks = [] diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index 76673c5f69b33..f0fe05cc140f2 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -125,8 +125,6 @@ cargo test -p pallet-balances --features runtime-benchmarks > ``` > To solve this, navigate to the folder of the node (`cd bin/node/cli`) or pallet (`cd frame/pallet`) and run the command there. -This will instance each linear component with different values. The number of values per component is set to six and can be changed with the `VALUES_PER_COMPONENT` environment variable. - ## Adding Benchmarks The benchmarks included with each pallet are not automatically added to your node. To actually @@ -185,7 +183,7 @@ Then you can run a benchmark like so: ``` This will output a file `pallet_name.rs` which implements the `WeightInfo` trait you should include -in your pallet. Double colons `::` will be replaced with a `_` in the output name if you specify a directory. Each blockchain should generate their own benchmark file with their custom +in your pallet. Each blockchain should generate their own benchmark file with their custom implementation of the `WeightInfo` trait. This means that you will be able to use these modular Substrate pallets while still keeping your network safe for your specific configuration and requirements. diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 0b77a92347d03..c19af781234f8 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -18,16 +18,17 @@ //! Tools for analyzing the benchmark results. use crate::BenchmarkResult; +use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; use std::collections::BTreeMap; +pub use linregress::RegressionModel; + pub struct Analysis { pub base: u128, pub slopes: Vec, pub names: Vec, pub value_dists: Option, u128, u128)>>, - pub errors: Option>, - pub minimum: u128, - selector: BenchmarkSelector, + pub model: Option, } #[derive(Clone, Copy)] @@ -39,65 +40,6 @@ pub enum BenchmarkSelector { ProofSize, } -/// Multiplies the value by 1000 and converts it into an u128. -fn mul_1000_into_u128(value: f64) -> u128 { - // This is slighly more precise than the alternative of `(value * 1000.0) as u128`. - (value as u128) - .saturating_mul(1000) - .saturating_add((value.fract() * 1000.0) as u128) -} - -impl BenchmarkSelector { - fn scale_and_cast_weight(self, value: f64, round_up: bool) -> u128 { - if let BenchmarkSelector::ExtrinsicTime = self { - // We add a very slight bias here to counteract the numerical imprecision of the linear - // regression where due to rounding issues it can emit a number like `2999999.999999998` - // which we most certainly always want to round up instead of truncating. - mul_1000_into_u128(value + 0.000_000_005) - } else { - if round_up { - (value + 0.5) as u128 - } else { - value as u128 - } - } - } - - fn scale_weight(self, value: u128) -> u128 { - if let BenchmarkSelector::ExtrinsicTime = self { - value.saturating_mul(1000) - } else { - value - } - } - - fn nanos_from_weight(self, value: u128) -> u128 { - if let BenchmarkSelector::ExtrinsicTime = self { - value / 1000 - } else { - value - } - } - - fn get_value(self, result: &BenchmarkResult) -> u128 { - match self { - BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, - BenchmarkSelector::StorageRootTime => result.storage_root_time, - BenchmarkSelector::Reads => result.reads.into(), - BenchmarkSelector::Writes => result.writes.into(), - BenchmarkSelector::ProofSize => result.proof_size.into(), - } - } - - fn get_minimum(self, results: &[BenchmarkResult]) -> u128 { - results - .iter() - .map(|result| self.get_value(result)) - .min() - .expect("results cannot be empty") - } -} - #[derive(Debug)] pub enum AnalysisChoice { /// Use minimum squares regression for analyzing the benchmarking results. @@ -130,70 +72,6 @@ impl TryFrom> for AnalysisChoice { } } -fn raw_linear_regression( - xs: &[f64], - ys: &[f64], - x_vars: usize, - with_intercept: bool, -) -> Option<(f64, Vec, Vec)> { - let mut data: Vec = Vec::new(); - - // Here we build a raw matrix of linear equations for the `linregress` crate to solve for us - // and build a linear regression model around it. - // - // Each row of the matrix contains as the first column the actual value which we want - // the model to predict for us (the `y`), and the rest of the columns contain the input - // parameters on which the model will base its predictions on (the `xs`). - // - // In machine learning terms this is essentially the training data for the model. - // - // As a special case the very first input parameter represents the constant factor - // of the linear equation: the so called "intercept value". Since it's supposed to - // be constant we can just put a dummy input parameter of either a `1` (in case we want it) - // or a `0` (in case we do not). - for (&y, xs) in ys.iter().zip(xs.chunks_exact(x_vars)) { - data.push(y); - if with_intercept { - data.push(1.0); - } else { - data.push(0.0); - } - data.extend(xs); - } - let model = linregress::fit_low_level_regression_model(&data, ys.len(), x_vars + 2).ok()?; - Some((model.parameters[0], model.parameters[1..].to_vec(), model.se)) -} - -fn linear_regression( - xs: Vec, - mut ys: Vec, - x_vars: usize, -) -> Option<(f64, Vec, Vec)> { - let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, true)?; - if intercept >= -0.0001 { - // The intercept is positive, or is effectively zero. - return Some((intercept, params, errors[1..].to_vec())) - } - - // The intercept is negative. - // The weights must be always positive, so we can't have that. - - let mut min = ys[0]; - for &value in &ys { - if value < min { - min = value; - } - } - - for value in &mut ys { - *value -= min; - } - - let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, false)?; - assert!(intercept.abs() <= 0.0001); - Some((min, params, errors[1..].to_vec())) -} - impl Analysis { // Useful for when there are no components, and we just need an median value of the benchmark // results. Note: We choose the median value because it is more robust to outliers. @@ -213,17 +91,15 @@ impl Analysis { }) .collect(); - values.sort(); + values.sort_unstable(); let mid = values.len() / 2; Some(Self { - base: selector.scale_weight(values[mid]), + base: values[mid], slopes: Vec::new(), names: Vec::new(), value_dists: None, - errors: None, - minimum: selector.get_minimum(&r), - selector, + model: None, }) } @@ -310,20 +186,15 @@ impl Analysis { }) .collect::>(); - let base = selector.scale_and_cast_weight(models[0].0.max(0f64), false); - let slopes = models - .iter() - .map(|x| selector.scale_and_cast_weight(x.1.max(0f64), false)) - .collect::>(); + let base = models[0].0.max(0f64) as u128; + let slopes = models.iter().map(|x| x.1.max(0f64) as u128).collect::>(); Some(Self { base, slopes, names: results.into_iter().map(|x| x.0).collect::>(), value_dists: None, - errors: None, - minimum: selector.get_minimum(&r), - selector, + model: None, }) } @@ -345,12 +216,41 @@ impl Analysis { } for (_, rs) in results.iter_mut() { - rs.sort(); + rs.sort_unstable(); let ql = rs.len() / 4; *rs = rs[ql..rs.len() - ql].to_vec(); } + let mut data = + vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; + let names = r[0].components.iter().map(|x| format!("{:?}", x.0)).collect::>(); + data.extend(names.iter().enumerate().map(|(i, p)| { + ( + p.as_str(), + results + .iter() + .flat_map(|x| Some(x.0[i] as f64).into_iter().cycle().take(x.1.len())) + .collect::>(), + ) + })); + + let data = RegressionDataBuilder::new().build_from(data).ok()?; + + let model = FormulaRegressionBuilder::new() + .data(&data) + .formula(format!("Y ~ {}", names.join(" + "))) + .fit() + .ok()?; + + let slopes = model + .parameters + .regressor_values + .iter() + .enumerate() + .map(|(_, x)| (*x + 0.5) as u128) + .collect(); + let value_dists = results .iter() .map(|(p, vs)| { @@ -369,34 +269,12 @@ impl Analysis { }) .collect::>(); - let mut ys: Vec = Vec::new(); - let mut xs: Vec = Vec::new(); - for result in results { - let x: Vec = result.0.iter().map(|value| *value as f64).collect(); - for y in result.1 { - xs.extend(x.iter().copied()); - ys.push(y as f64); - } - } - - let (intercept, slopes, errors) = linear_regression(xs, ys, r[0].components.len())?; - Some(Self { - base: selector.scale_and_cast_weight(intercept, true), - slopes: slopes - .into_iter() - .map(|value| selector.scale_and_cast_weight(value, true)) - .collect(), + base: (model.parameters.intercept_value + 0.5) as u128, + slopes, names, value_dists: Some(value_dists), - errors: Some( - errors - .into_iter() - .map(|value| selector.scale_and_cast_weight(value, false)) - .collect(), - ), - minimum: selector.get_minimum(&r), - selector, + model: Some(model), }) } @@ -426,10 +304,9 @@ impl Analysis { .for_each(|(a, b)| assert!(a == b, "benchmark results not in the same order")); let names = median_slopes.names; let value_dists = min_squares.value_dists; - let errors = min_squares.errors; - let minimum = selector.get_minimum(&r); + let model = min_squares.model; - Some(Self { base, slopes, names, value_dists, errors, selector, minimum }) + Some(Self { base, slopes, names, value_dists, model }) } } @@ -486,19 +363,18 @@ impl std::fmt::Display for Analysis { } } } - - if let Some(ref errors) = self.errors { + if let Some(ref model) = self.model { writeln!(f, "\nQuality and confidence:")?; writeln!(f, "param error")?; - for (p, se) in self.names.iter().zip(errors.iter()) { - writeln!(f, "{} {:>8}", p, ms(self.selector.nanos_from_weight(*se)))?; + for (p, se) in self.names.iter().zip(model.se.regressor_values.iter()) { + writeln!(f, "{} {:>8}", p, ms(*se as u128))?; } } writeln!(f, "\nModel:")?; - writeln!(f, "Time ~= {:>8}", ms(self.selector.nanos_from_weight(self.base)))?; + writeln!(f, "Time ~= {:>8}", ms(self.base))?; for (&t, n) in self.slopes.iter().zip(self.names.iter()) { - writeln!(f, " + {} {:>8}", n, ms(self.selector.nanos_from_weight(t)))?; + writeln!(f, " + {} {:>8}", n, ms(t))?; } writeln!(f, " µs") } @@ -539,52 +415,6 @@ mod tests { } } - #[test] - fn test_linear_regression() { - let ys = vec![ - 3797981.0, - 37857779.0, - 70569402.0, - 104004114.0, - 137233924.0, - 169826237.0, - 203521133.0, - 237552333.0, - 271082065.0, - 305554637.0, - 335218347.0, - 371759065.0, - 405086197.0, - 438353555.0, - 472891417.0, - 505339532.0, - 527784778.0, - 562590596.0, - 635291991.0, - 673027090.0, - 708119408.0, - ]; - let xs = vec![ - 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, - 16.0, 17.0, 18.0, 19.0, 20.0, - ]; - - let (intercept, params, errors) = raw_linear_regression(&xs, &ys, 1, true).unwrap(); - assert_eq!(intercept as i64, -2712997); - assert_eq!(params.len(), 1); - assert_eq!(params[0] as i64, 34444926); - assert_eq!(errors.len(), 2); - assert_eq!(errors[0] as i64, 4805766); - assert_eq!(errors[1] as i64, 411084); - - let (intercept, params, errors) = linear_regression(xs, ys, 1).unwrap(); - assert_eq!(intercept as i64, 3797981); - assert_eq!(params.len(), 1); - assert_eq!(params[0] as i64, 33968513); - assert_eq!(errors.len(), 1); - assert_eq!(errors[0] as i64, 217331); - } - #[test] fn analysis_median_slopes_should_work() { let data = vec![ @@ -648,8 +478,8 @@ mod tests { let extrinsic_time = Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 10_000_000_000); - assert_eq!(extrinsic_time.slopes, vec![1_000_000_000, 100_000_000]); + assert_eq!(extrinsic_time.base, 10_000_000); + assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); let reads = Analysis::median_slopes(&data, BenchmarkSelector::Reads).unwrap(); assert_eq!(reads.base, 2); @@ -723,8 +553,8 @@ mod tests { let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 10_000_000_000); - assert_eq!(extrinsic_time.slopes, vec![1000000000, 100000000]); + assert_eq!(extrinsic_time.base, 10_000_000); + assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); let reads = Analysis::min_squares_iqr(&data, BenchmarkSelector::Reads).unwrap(); assert_eq!(reads.base, 2); @@ -734,36 +564,4 @@ mod tests { assert_eq!(writes.base, 0); assert_eq!(writes.slopes, vec![0, 2]); } - - #[test] - fn analysis_min_squares_iqr_uses_multiple_samples_for_same_parameters() { - let data = vec![ - benchmark_result(vec![(BenchmarkParameter::n, 0)], 2_000_000, 0, 0, 0), - benchmark_result(vec![(BenchmarkParameter::n, 0)], 4_000_000, 0, 0, 0), - benchmark_result(vec![(BenchmarkParameter::n, 1)], 4_000_000, 0, 0, 0), - benchmark_result(vec![(BenchmarkParameter::n, 1)], 8_000_000, 0, 0, 0), - ]; - - let extrinsic_time = - Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 3_000_000_000); - assert_eq!(extrinsic_time.slopes, vec![3_000_000_000]); - } - - #[test] - fn intercept_of_a_little_under_zero_is_rounded_up_to_zero() { - // Analytically this should result in an intercept of 0, but - // due to numerical imprecision this will generate an intercept - // equal to roughly -0.0000000000000004440892098500626 - let data = vec![ - benchmark_result(vec![(BenchmarkParameter::n, 1)], 2, 0, 0, 0), - benchmark_result(vec![(BenchmarkParameter::n, 2)], 4, 0, 0, 0), - benchmark_result(vec![(BenchmarkParameter::n, 3)], 6, 0, 0, 0), - ]; - - let extrinsic_time = - Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 0); - assert_eq!(extrinsic_time.slopes, vec![2000]); - } } diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 5fd845551daca..1ceb9a4f8904c 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -90,7 +90,7 @@ benchmarks! { } sr25519_verification { - let i in 0 .. 100; + let i in 1 .. 100; let public = SignerId::generate_pair(None); @@ -176,16 +176,16 @@ pub mod mock { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index a221eccb82c85..afd53915cc397 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -30,7 +30,7 @@ mod utils; pub mod baseline; #[cfg(feature = "std")] -pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector}; +pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector, RegressionModel}; #[doc(hidden)] pub use frame_support; #[doc(hidden)] @@ -38,8 +38,6 @@ pub use log; #[doc(hidden)] pub use paste; #[doc(hidden)] -pub use sp_core::defer; -#[doc(hidden)] pub use sp_io::storage::root as storage_root; #[doc(hidden)] pub use sp_runtime::traits::Zero; @@ -86,12 +84,12 @@ macro_rules! whitelist { /// ``` /// /// Note that due to parsing restrictions, if the `from` expression is not a single token (i.e. a -/// literal or constant), then it must be parenthesized. +/// literal or constant), then it must be parenthesised. /// /// The macro allows for a number of "arms", each representing an individual benchmark. Using the /// simple syntax, the associated dispatchable function maps 1:1 with the benchmark and the name of /// the benchmark is the same as that of the associated function. However, extended syntax allows -/// for arbitrary expressions to be evaluated in a benchmark (including for example, +/// for arbitrary expresions to be evaluated in a benchmark (including for example, /// `on_initialize`). /// /// Note that the ranges are *inclusive* on both sides. This is in contrast to ranges in Rust which @@ -112,14 +110,14 @@ macro_rules! whitelist { /// foo { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// }: _(RuntimeOrigin::Signed(caller), vec![0u8; l]) +/// }: _(Origin::Signed(caller), vec![0u8; l]) /// /// // second dispatchable: bar; this is a root dispatchable and accepts a `u8` vector of size /// // `l`. /// // In this case, we explicitly name the call using `bar` instead of `_`. /// bar { /// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// }: bar(RuntimeOrigin::Root, vec![0u8; l]) +/// }: bar(Origin::Root, vec![0u8; l]) /// /// // third dispatchable: baz; this is a user dispatchable. It isn't dependent on length like the /// // other two but has its own complexity `c` that needs setting up. It uses `caller` (in the @@ -128,20 +126,20 @@ macro_rules! whitelist { /// baz1 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let c = 0 .. 10 => setup_c(&caller, c); -/// }: baz(RuntimeOrigin::Signed(caller)) +/// }: baz(Origin::Signed(caller)) /// /// // this is a second benchmark of the baz dispatchable with a different setup. /// baz2 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let c = 0 .. 10 => setup_c_in_some_other_way(&caller, c); -/// }: baz(RuntimeOrigin::Signed(caller)) +/// }: baz(Origin::Signed(caller)) /// /// // You may optionally specify the origin type if it can't be determined automatically like /// // this. /// baz3 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// }: baz(RuntimeOrigin::Signed(caller), vec![0u8; l]) +/// }: baz(Origin::Signed(caller), vec![0u8; l]) /// /// // this is benchmarking some code that is not a dispatchable. /// populate_a_set { @@ -547,7 +545,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: _ $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { @@ -557,7 +555,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: _ $(<$origin_type>)? ( $origin $( , $arg )* ) + $name { $( $code )* }: _ ( $origin $( , $arg )* ) verify { } $( $rest )* } @@ -570,7 +568,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $dispatch:ident $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { @@ -580,7 +578,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $dispatch $(<$origin_type>)? ( $origin $( , $arg )* ) + $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) verify { } $( $rest )* } @@ -593,7 +591,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $(<$origin_type:ty>)? $eval:block + $name:ident { $( $code:tt )* }: $eval:block $( $rest:tt )* ) => { $crate::benchmarks_iter!( @@ -603,7 +601,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $(<$origin_type>)? $eval + $name { $( $code )* }: $eval verify { } $( $rest )* ); @@ -617,7 +615,7 @@ macro_rules! to_origin { $origin.into() }; ($origin:expr, $origin_type:ty) => { - <::RuntimeOrigin as From<$origin_type>>::from($origin) + >::from($origin) }; } @@ -975,8 +973,6 @@ macro_rules! impl_benchmark { ( $( $name_extra:ident ),* ) ( $( $name_skip_meta:ident ),* ) ) => { - // We only need to implement benchmarks for the runtime-benchmarks feature or testing. - #[cfg(any(feature = "runtime-benchmarks", test))] impl, $instance: $instance_bound )? > $crate::Benchmarking for Pallet where T: frame_system::Config, $( $where_clause )* @@ -1037,9 +1033,6 @@ macro_rules! impl_benchmark { // Always do at least one internal repeat... for _ in 0 .. internal_repeats.max(1) { - // Always reset the state after the benchmark. - $crate::defer!($crate::benchmarking::wipe_db()); - // Set up the externalities environment for the setup we want to // benchmark. let closure_to_benchmark = < @@ -1061,9 +1054,7 @@ macro_rules! impl_benchmark { // Time the extrinsic logic. $crate::log::trace!( target: "benchmark", - "Start Benchmark: {} ({:?})", - extrinsic, - c + "Start Benchmark: {:?}", c ); let start_pov = $crate::benchmarking::proof_size(); @@ -1117,6 +1108,9 @@ macro_rules! impl_benchmark { proof_size: diff_pov, keys: read_and_written_keys, }); + + // Wipe the DB back to the genesis state. + $crate::benchmarking::wipe_db(); } return Ok(results); @@ -1155,8 +1149,6 @@ macro_rules! impl_benchmark { // This creates a unit test for one benchmark of the main benchmark macro. // It runs the benchmark using the `high` and `low` value for each component // and ensure that everything completes successfully. -// Instances each component with six values which can be controlled with the -// env variable `VALUES_PER_COMPONENT`. #[macro_export] #[doc(hidden)] macro_rules! impl_benchmark_test { @@ -1181,9 +1173,6 @@ macro_rules! impl_benchmark_test { let execute_benchmark = | c: $crate::Vec<($crate::BenchmarkParameter, u32)> | -> Result<(), $crate::BenchmarkError> { - // Always reset the state after the benchmark. - $crate::defer!($crate::benchmarking::wipe_db()); - // Set up the benchmark, return execution + verification function. let closure_to_verify = < SelectedBenchmark as $crate::BenchmarkingSetup @@ -1195,48 +1184,27 @@ macro_rules! impl_benchmark_test { } // Run execution + verification - closure_to_verify() + closure_to_verify()?; + + // Reset the state + $crate::benchmarking::wipe_db(); + + Ok(()) }; if components.is_empty() { execute_benchmark(Default::default())?; } else { - let num_values: u32 = if let Ok(ev) = std::env::var("VALUES_PER_COMPONENT") { - ev.parse().map_err(|_| { - $crate::BenchmarkError::Stop( - "Could not parse env var `VALUES_PER_COMPONENT` as u32." - ) - })? - } else { - 6 - }; - - if num_values < 2 { - return Err("`VALUES_PER_COMPONENT` must be at least 2".into()); - } - - for (name, low, high) in components.clone().into_iter() { - // Test the lowest, highest (if its different from the lowest) - // and up to num_values-2 more equidistant values in between. - // For 0..10 and num_values=6 this would mean: [0, 2, 4, 6, 8, 10] - - let mut values = $crate::vec![low]; - let diff = (high - low).min(num_values - 1); - let slope = (high - low) as f32 / diff as f32; - - for i in 1..=diff { - let value = ((low as f32 + slope * i as f32) as u32) - .clamp(low, high); - values.push(value); - } - - for component_value in values { + for (name, low, high) in components.iter() { + // Test only the low and high value, assuming values in the middle + // won't break + for component_value in $crate::vec![low, high] { // Select the max value for all the other components. let c: $crate::Vec<($crate::BenchmarkParameter, u32)> = components .iter() .map(|(n, _, h)| - if *n == name { - (*n, component_value) + if n == name { + (*n, *component_value) } else { (*n, *h) } @@ -1695,13 +1663,13 @@ pub fn show_benchmark_debug_info( /// use frame_benchmarking::TrackedStorageKey; /// let whitelist: Vec = vec![ /// // Block Number -/// array_bytes::hex_into_unchecked("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac"), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), /// // Total Issuance -/// array_bytes::hex_into_unchecked("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80"), +/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), /// // Execution Phase -/// array_bytes::hex_into_unchecked("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a"), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), /// // Event Count -/// array_bytes::hex_into_unchecked("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850"), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), /// ]; /// ``` /// diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 88a7d6d0286b2..06f2b5bdc4916 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -27,7 +27,6 @@ use sp_runtime::{ BuildStorage, }; use sp_std::prelude::*; -use std::cell::RefCell; #[frame_support::pallet] mod pallet_test { @@ -36,7 +35,7 @@ mod pallet_test { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData); + pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config { @@ -46,21 +45,21 @@ mod pallet_test { } #[pallet::storage] - #[pallet::getter(fn value)] + #[pallet::getter(fn heartbeat_after)] pub(crate) type Value = StorageValue<_, u32, OptionQuery>; #[pallet::call] impl Pallet { #[pallet::weight(0)] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { - let _sender = ensure_signed(origin)?; + let _sender = frame_system::ensure_signed(origin)?; Value::::put(n); Ok(()) } #[pallet::weight(0)] pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { - let _sender = ensure_none(origin)?; + let _sender = frame_system::ensure_none(origin)?; Ok(()) } @@ -90,16 +89,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -126,20 +125,11 @@ fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig::default().build_storage().unwrap().into() } -thread_local! { - /// Tracks the used components per value. Needs to be a thread local since the - /// benchmarking clears the storage after each run. - static VALUES_PER_COMPONENT: RefCell> = RefCell::new(vec![]); -} - -// NOTE: This attribute is only needed for the `modify_in_` functions. -#[allow(unreachable_code)] mod benchmarks { - use super::{new_test_ext, pallet_test::Value, Test, VALUES_PER_COMPONENT}; + use super::{new_test_ext, pallet_test::Value, Test}; use crate::{account, BenchmarkError, BenchmarkParameter, BenchmarkResult, BenchmarkingSetup}; use frame_support::{assert_err, assert_ok, ensure, traits::Get}; use frame_system::RawOrigin; - use rusty_fork::rusty_fork_test; use sp_std::prelude::*; // Additional used internally by the benchmark macro. @@ -148,7 +138,7 @@ mod benchmarks { crate::benchmarks! { where_clause { where - crate::tests::RuntimeOrigin: From::AccountId>>, + crate::tests::Origin: From::AccountId>>, } set_value { @@ -237,31 +227,6 @@ mod benchmarks { // This should never be reached. assert!(value > 100); } - - modify_in_setup_then_error { - Value::::set(Some(123)); - return Err(BenchmarkError::Stop("Should error")); - }: { } - - modify_in_call_then_error { - }: { - Value::::set(Some(123)); - return Err(BenchmarkError::Stop("Should error")); - } - - modify_in_verify_then_error { - }: { - } verify { - Value::::set(Some(123)); - return Err(BenchmarkError::Stop("Should error")); - } - - // Stores all component values in the thread-local storage. - values_per_component { - let n in 0 .. 10; - }: { - VALUES_PER_COMPONENT.with(|v| v.borrow_mut().push(n)); - } } #[test] @@ -385,70 +350,4 @@ mod benchmarks { assert_eq!(Pallet::::test_benchmark_skip_benchmark(), Err(BenchmarkError::Skip),); }); } - - /// An error return of a benchmark test function still causes the db to be wiped. - #[test] - fn benchmark_error_wipes_storage() { - new_test_ext().execute_with(|| { - // It resets when the error happens in the setup: - assert_err!( - Pallet::::test_benchmark_modify_in_setup_then_error(), - "Should error" - ); - assert_eq!(Value::::get(), None); - - // It resets when the error happens in the call: - assert_err!(Pallet::::test_benchmark_modify_in_call_then_error(), "Should error"); - assert_eq!(Value::::get(), None); - - // It resets when the error happens in the verify: - assert_err!( - Pallet::::test_benchmark_modify_in_verify_then_error(), - "Should error" - ); - assert_eq!(Value::::get(), None); - }); - } - - rusty_fork_test! { - /// Test that the benchmarking uses the correct values for each component and - /// that the number of components can be controlled with `VALUES_PER_COMPONENT`. - /// - /// NOTE: This test needs to run in its own process, since it - /// otherwise messes up the env variable for the other tests. - #[test] - fn test_values_per_component() { - let tests = vec![ - (Some("1"), Err("`VALUES_PER_COMPONENT` must be at least 2".into())), - (Some("asdf"), Err("Could not parse env var `VALUES_PER_COMPONENT` as u32.".into())), - (None, Ok(vec![0, 2, 4, 6, 8, 10])), - (Some("2"), Ok(vec![0, 10])), - (Some("4"), Ok(vec![0, 3, 6, 10])), - (Some("6"), Ok(vec![0, 2, 4, 6, 8, 10])), - (Some("10"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10])), - (Some("11"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), - (Some("99"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), - ]; - - for (num, expected) in tests { - run_test_values_per_component(num, expected); - } - } - } - - /// Helper for [`test_values_per_component`]. - fn run_test_values_per_component(num: Option<&str>, output: Result, BenchmarkError>) { - VALUES_PER_COMPONENT.with(|v| v.borrow_mut().clear()); - match num { - Some(n) => std::env::set_var("VALUES_PER_COMPONENT", n), - None => std::env::remove_var("VALUES_PER_COMPONENT"), - } - - new_test_ext().execute_with(|| { - let got = Pallet::::test_benchmark_values_per_component() - .map(|_| VALUES_PER_COMPONENT.with(|v| v.borrow().clone())); - - assert_eq!(got, output); - }); - } } diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index 7e1cd48840687..ef8351d37e957 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -28,51 +28,47 @@ use sp_runtime::{ }; use sp_std::prelude::*; -#[frame_support::pallet] mod pallet_test { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use frame_support::pallet_prelude::Get; - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData<(T, I)>); - - pub trait OtherConfig { - type OtherEvent; + frame_support::decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as Test where + ::OtherEvent: Into<>::Event> + { + pub Value get(fn value): Option; + } } - #[pallet::config] - pub trait Config: frame_system::Config + OtherConfig { - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; - type LowerBound: Get; - type UpperBound: Get; - } + frame_support::decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call where + origin: T::Origin, ::OtherEvent: Into<>::Event> + { + #[weight = 0] + fn set_value(origin, n: u32) -> frame_support::dispatch::DispatchResult { + let _sender = frame_system::ensure_signed(origin)?; + Value::::put(n); + Ok(()) + } - #[pallet::storage] - #[pallet::getter(fn value)] - pub(crate) type Value, I: 'static = ()> = StorageValue<_, u32, OptionQuery>; + #[weight = 0] + fn dummy(origin, _n: u32) -> frame_support::dispatch::DispatchResult { + let _sender = frame_system::ensure_none(origin)?; + Ok(()) + } + } + } - #[pallet::event] - pub enum Event, I: 'static = ()> {} + pub trait OtherConfig { + type OtherEvent; + } - #[pallet::call] - impl, I: 'static> Pallet + pub trait Config: frame_system::Config + OtherConfig where - ::OtherEvent: Into<>::RuntimeEvent>, + Self::OtherEvent: Into<>::Event>, { - #[pallet::weight(0)] - pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { - let _sender = ensure_signed(origin)?; - Value::::put(n); - Ok(()) - } - - #[pallet::weight(0)] - pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { - let _sender = ensure_none(origin)?; - Ok(()) - } + type Event; + type LowerBound: Get; + type UpperBound: Get; } } @@ -86,7 +82,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - TestPallet: pallet_test::{Pallet, Call, Storage, Event}, + TestPallet: pallet_test::{Pallet, Call, Storage}, } ); @@ -94,18 +90,18 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type DbWeight = (); + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); - type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; type AccountData = (); @@ -114,17 +110,17 @@ impl frame_system::Config for Test { type SystemWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = ConstU32<16>; + type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_test::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type LowerBound = ConstU32<1>; type UpperBound = ConstU32<100>; } impl pallet_test::OtherConfig for Test { - type OtherEvent = RuntimeEvent; + type OtherEvent = Event; } fn new_test_ext() -> sp_io::TestExternalities { @@ -134,19 +130,20 @@ fn new_test_ext() -> sp_io::TestExternalities { mod benchmarks { use super::pallet_test::{self, Value}; use crate::account; - use frame_support::ensure; + use frame_support::{ensure, StorageValue}; use frame_system::RawOrigin; use sp_std::prelude::*; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; + use frame_support::traits::Instance; - crate::benchmarks_instance_pallet! { + crate::benchmarks_instance! { where_clause { where ::OtherEvent: Clone - + Into<>::RuntimeEvent>, - >::RuntimeEvent: Clone, + + Into<>::Event>, + >::Event: Clone, } set_value { @@ -154,7 +151,7 @@ mod benchmarks { let caller = account::("caller", 0, 0); }: _ (RawOrigin::Signed(caller), b.into()) verify { - assert_eq!(Value::::get(), Some(b)); + assert_eq!(Value::::get(), Some(b)); } other_name { diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 753e8c1c684ee..8c642f74358db 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -23,14 +23,14 @@ use frame_support::{ traits::StorageInfo, }; #[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; +use serde::Serialize; use sp_io::hashing::blake2_256; use sp_runtime::traits::TrailingZeroInput; use sp_std::{prelude::Box, vec::Vec}; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize))] #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] @@ -71,7 +71,7 @@ impl std::fmt::Display for BenchmarkParameter { } /// The results of a single of benchmark. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatch { /// The pallet containing this benchmark. @@ -89,7 +89,7 @@ pub struct BenchmarkBatch { // TODO: could probably make API cleaner here. /// The results of a single of benchmark, where time and db results are separated. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatchSplitResults { /// The pallet containing this benchmark. @@ -110,7 +110,7 @@ pub struct BenchmarkBatchSplitResults { /// Result from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] pub struct BenchmarkResult { pub components: Vec<(BenchmarkParameter, u32)>, @@ -121,13 +121,13 @@ pub struct BenchmarkResult { pub writes: u32, pub repeat_writes: u32, pub proof_size: u32, - #[cfg_attr(feature = "std", serde(skip))] + #[cfg_attr(feature = "std", serde(skip_serializing))] pub keys: Vec<(Vec, u32, u32, bool)>, } impl BenchmarkResult { pub fn from_weight(w: Weight) -> Self { - Self { extrinsic_time: (w.ref_time() / 1_000) as u128, ..Default::default() } + Self { extrinsic_time: (w as u128) / 1_000, ..Default::default() } } } @@ -141,14 +141,6 @@ mod serde_as_str { let s = std::str::from_utf8(value).map_err(serde::ser::Error::custom)?; serializer.collect_str(s) } - - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: serde::de::Deserializer<'de>, - { - let s: &str = serde::de::Deserialize::deserialize(deserializer)?; - Ok(s.into()) - } } /// Possible errors returned from the benchmarking pipeline. diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index 5e5a2e7ee343c..8b36601940cf3 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for frame_benchmarking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/benchmarking/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/benchmarking/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -60,108 +57,76 @@ pub trait WeightInfo { /// Weights for frame_benchmarking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// The range of component `i` is `[0, 1000000]`. fn addition(_i: u32, ) -> Weight { - // Minimum execution time: 108 nanoseconds. - Weight::from_ref_time(137_610 as u64) + (103_000 as Weight) } - /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { - // Minimum execution time: 104 nanoseconds. - Weight::from_ref_time(133_508 as u64) + (105_000 as Weight) } - /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { - // Minimum execution time: 110 nanoseconds. - Weight::from_ref_time(140_230 as u64) + (113_000 as Weight) } - /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { - // Minimum execution time: 96 nanoseconds. - Weight::from_ref_time(136_059 as u64) + (102_000 as Weight) } - /// The range of component `i` is `[0, 100]`. fn hashing(_i: u32, ) -> Weight { - // Minimum execution time: 21_804_747 nanoseconds. - Weight::from_ref_time(22_013_681_386 as u64) + (20_865_902_000 as Weight) } - /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { - // Minimum execution time: 136 nanoseconds. - Weight::from_ref_time(156_000 as u64) - // Standard Error: 4_531 - .saturating_add(Weight::from_ref_time(46_817_640 as u64).saturating_mul(i as u64)) + (319_000 as Weight) + // Standard Error: 8_000 + .saturating_add((47_171_000 as Weight).saturating_mul(i as Weight)) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. fn storage_read(i: u32, ) -> Weight { - // Minimum execution time: 125 nanoseconds. - Weight::from_ref_time(135_000 as u64) - // Standard Error: 3_651 - .saturating_add(Weight::from_ref_time(2_021_172 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((2_110_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. fn storage_write(i: u32, ) -> Weight { - // Minimum execution time: 120 nanoseconds. - Weight::from_ref_time(131_000 as u64) - // Standard Error: 348 - .saturating_add(Weight::from_ref_time(377_243 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 0 + .saturating_add((372_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } } // For backwards compatibility and tests impl WeightInfo for () { - /// The range of component `i` is `[0, 1000000]`. fn addition(_i: u32, ) -> Weight { - // Minimum execution time: 108 nanoseconds. - Weight::from_ref_time(137_610 as u64) + (103_000 as Weight) } - /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { - // Minimum execution time: 104 nanoseconds. - Weight::from_ref_time(133_508 as u64) + (105_000 as Weight) } - /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { - // Minimum execution time: 110 nanoseconds. - Weight::from_ref_time(140_230 as u64) + (113_000 as Weight) } - /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { - // Minimum execution time: 96 nanoseconds. - Weight::from_ref_time(136_059 as u64) + (102_000 as Weight) } - /// The range of component `i` is `[0, 100]`. fn hashing(_i: u32, ) -> Weight { - // Minimum execution time: 21_804_747 nanoseconds. - Weight::from_ref_time(22_013_681_386 as u64) + (20_865_902_000 as Weight) } - /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { - // Minimum execution time: 136 nanoseconds. - Weight::from_ref_time(156_000 as u64) - // Standard Error: 4_531 - .saturating_add(Weight::from_ref_time(46_817_640 as u64).saturating_mul(i as u64)) + (319_000 as Weight) + // Standard Error: 8_000 + .saturating_add((47_171_000 as Weight).saturating_mul(i as Weight)) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. fn storage_read(i: u32, ) -> Weight { - // Minimum execution time: 125 nanoseconds. - Weight::from_ref_time(135_000 as u64) - // Standard Error: 3_651 - .saturating_add(Weight::from_ref_time(2_021_172 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((2_110_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. fn storage_write(i: u32, ) -> Weight { - // Minimum execution time: 120 nanoseconds. - Weight::from_ref_time(131_000 as u64) - // Standard Error: 348 - .saturating_add(Weight::from_ref_time(377_243 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 0 + .saturating_add((372_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } } diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index 4aaf088abb5b6..645772fb27669 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -33,7 +33,6 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 07dd781c29af3..7566c32f6e9a1 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -37,8 +37,7 @@ fn create_approved_bounties, I: 'static>(n: u32) -> Result<(), &'st setup_bounty::(i, T::MaximumReasonLength::get()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - Bounties::::approve_bounty(approve_origin, bounty_id)?; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; } ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); Ok(()) @@ -62,16 +61,20 @@ fn setup_bounty, I: 'static>( } fn create_bounty, I: 'static>( -) -> Result<(AccountIdLookupOf, BountyIndex), &'static str> { +) -> Result<(::Source, BountyIndex), &'static str> { let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup.clone(), fee)?; + Bounties::::propose_curator( + RawOrigin::Root.into(), + bounty_id, + curator_lookup.clone(), + fee, + )?; Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; Ok((curator_lookup, bounty_id)) } @@ -82,7 +85,7 @@ fn setup_pot_account, I: 'static>() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -97,8 +100,7 @@ benchmarks_instance_pallet! { let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - }: _(approve_origin, bounty_id) + }: _(RawOrigin::Root, bounty_id) propose_curator { setup_pot_account::(); @@ -106,11 +108,9 @@ benchmarks_instance_pallet! { let curator_lookup = T::Lookup::unlookup(curator); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - Bounties::::approve_bounty(approve_origin, bounty_id)?; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - let approve_origin = T::ApproveOrigin::successful_origin(); - }: _(approve_origin, bounty_id, curator_lookup, fee) + }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { @@ -128,10 +128,9 @@ benchmarks_instance_pallet! { let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup, fee)?; + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; }: _(RawOrigin::Signed(curator), bounty_id) award_bounty { @@ -170,16 +169,14 @@ benchmarks_instance_pallet! { let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - }: close_bounty(approve_origin, bounty_id) + }: close_bounty(RawOrigin::Root, bounty_id) close_bounty_active { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; Treasury::::on_initialize(T::BlockNumber::zero()); let bounty_id = BountyCount::::get() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - }: close_bounty(approve_origin, bounty_id) + }: close_bounty(RawOrigin::Root, bounty_id) verify { assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) } @@ -197,7 +194,7 @@ benchmarks_instance_pallet! { } spend_funds { - let b in 0 .. 100; + let b in 1 .. 100; setup_pot_account::(); create_approved_bounties::(b)?; @@ -214,13 +211,9 @@ benchmarks_instance_pallet! { ); } verify { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); ensure!(missed_any == false, "Missed some"); - if b > 0 { - ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); - assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) - } else { - ensure!(budget_remaining == BalanceOf::::max_value(), "Budget used"); - } + assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) } impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index d947226f87fa0..fca758fd96b8e 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -114,8 +114,6 @@ type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf /// An index of a bounty. Just a `u32`. pub type BountyIndex = u32; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -230,8 +228,7 @@ pub mod pallet { type DataDepositPerByte: Get>; /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Maximum acceptable reason length. /// @@ -384,7 +381,7 @@ pub mod pallet { pub fn propose_curator( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, - curator: AccountIdLookupOf, + curator: ::Source, #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; @@ -556,7 +553,7 @@ pub mod pallet { pub fn award_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, - beneficiary: AccountIdLookupOf, + beneficiary: ::Source, ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -819,7 +816,7 @@ impl, I: 'static> Pallet { value: BalanceOf, ) -> DispatchResult { let bounded_description: BoundedVec<_, _> = - description.try_into().map_err(|_| Error::::ReasonTooBig)?; + description.try_into().map_err(|()| Error::::ReasonTooBig)?; ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); let index = Self::bounty_count(); diff --git a/frame/bounties/src/migrations/v4.rs b/frame/bounties/src/migrations/v4.rs index 2f81c97127bcd..8f5f3ebe55bf4 100644 --- a/frame/bounties/src/migrations/v4.rs +++ b/frame/bounties/src/migrations/v4.rs @@ -54,7 +54,7 @@ pub fn migrate< target: "runtime::bounties", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero() + return 0 } let on_chain_storage_version =

::on_chain_storage_version(); @@ -105,7 +105,7 @@ pub fn migrate< "Attempted to apply migration to v4 but failed because storage version is {:?}", on_chain_storage_version, ); - Weight::zero() + 0 } } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 68aa56ccdde7f..b4ce039b35fbc 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -21,6 +21,7 @@ use super::*; use crate as pallet_bounties; +use std::cell::RefCell; use frame_support::{ assert_noop, assert_ok, @@ -68,16 +69,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -95,12 +96,15 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub static Burn: Permill = Permill::from_percent(50); @@ -113,7 +117,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -132,7 +136,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -155,7 +159,7 @@ parameter_types! { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BountyDepositBase = ConstU64<80>; type BountyDepositPayoutDelay = ConstU64<3>; type BountyUpdatePeriod = ConstU64<20>; @@ -170,7 +174,7 @@ impl Config for Test { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BountyDepositBase = ConstU64<80>; type BountyDepositPayoutDelay = ConstU64<3>; type BountyUpdatePeriod = ConstU64<20>; @@ -204,7 +208,7 @@ fn last_event() -> BountiesEvent { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::Bounties(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::Bounties(inner) = e { Some(inner) } else { None }) .last() .unwrap() } @@ -229,7 +233,7 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); assert_eq!(Balances::free_balance(0), 99); assert_eq!(Balances::reserved_balance(0), 1); }); @@ -238,7 +242,7 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); }); @@ -248,7 +252,7 @@ fn spend_proposal_takes_proportional_deposit() { fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3), + Treasury::propose_spend(Origin::signed(2), 100, 3), TreasuryError::InsufficientProposersBalance, ); }); @@ -259,8 +263,8 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -286,8 +290,8 @@ fn rejected_spend_proposal_ignored_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 0); @@ -300,12 +304,9 @@ fn reject_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); - assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), - TreasuryError::InvalidIndex - ); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); }); } @@ -313,7 +314,7 @@ fn reject_already_rejected_spend_proposal_fails() { fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), + Treasury::reject_proposal(Origin::root(), 0), pallet_treasury::Error::::InvalidIndex ); }); @@ -322,10 +323,7 @@ fn reject_non_existent_spend_proposal_fails() { #[test] fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), - TreasuryError::InvalidIndex - ); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); }); } @@ -334,12 +332,9 @@ fn accept_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); - assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), - TreasuryError::InvalidIndex - ); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); }); } @@ -349,8 +344,8 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -364,8 +359,8 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -386,14 +381,14 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -416,10 +411,10 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -443,7 +438,7 @@ fn propose_bounty_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 10, b"1234567890".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); assert_eq!(last_event(), BountiesEvent::BountyProposed { index: 0 }); @@ -478,21 +473,17 @@ fn propose_bounty_validation_works() { assert_eq!(Treasury::pot(), 100); assert_noop!( - Bounties::propose_bounty(RuntimeOrigin::signed(1), 0, [0; 17_000].to_vec()), + Bounties::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), Error::::ReasonTooBig ); assert_noop!( - Bounties::propose_bounty( - RuntimeOrigin::signed(1), - 10, - b"12345678901234567890".to_vec() - ), + Bounties::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), Error::::InsufficientProposersBalance ); assert_noop!( - Bounties::propose_bounty(RuntimeOrigin::signed(1), 0, b"12345678901234567890".to_vec()), + Bounties::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), Error::::InvalidValue ); }); @@ -503,11 +494,11 @@ fn close_bounty_works() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Bounties::close_bounty(RuntimeOrigin::root(), 0), Error::::InvalidIndex); + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::InvalidIndex); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 10, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); - assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); let deposit: u64 = 80 + 5; @@ -528,14 +519,11 @@ fn approve_bounty_works() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!( - Bounties::approve_bounty(RuntimeOrigin::root(), 0), - Error::::InvalidIndex - ); + assert_noop!(Bounties::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); let deposit: u64 = 80 + 5; @@ -552,10 +540,7 @@ fn approve_bounty_works() { ); assert_eq!(Bounties::bounty_approvals(), vec![0]); - assert_noop!( - Bounties::close_bounty(RuntimeOrigin::root(), 0), - Error::::UnexpectedStatus - ); + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); // deposit not returned yet assert_eq!(Balances::reserved_balance(0), deposit); @@ -591,24 +576,24 @@ fn assign_curator_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!( - Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 4), + Bounties::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex ); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); assert_noop!( - Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 50), + Bounties::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee ); let fee = 4; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -622,18 +607,15 @@ fn assign_curator_works() { } ); + assert_noop!(Bounties::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); assert_noop!( - Bounties::accept_curator(RuntimeOrigin::signed(1), 0), - Error::::RequireCurator - ); - assert_noop!( - Bounties::accept_curator(RuntimeOrigin::signed(4), 0), + Bounties::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance ); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); let expected_deposit = Bounties::calculate_curator_deposit(&fee); @@ -659,18 +641,18 @@ fn unassign_curator_works() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 4; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); - assert_noop!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0), BadOrigin); - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); + assert_noop!(Bounties::unassign_curator(Origin::signed(1), 0), BadOrigin); + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -684,11 +666,11 @@ fn unassign_curator_works() { } ); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); let expected_deposit = Bounties::calculate_curator_deposit(&fee); - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -713,26 +695,26 @@ fn award_and_claim_bounty_works() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 4; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); let expected_deposit = Bounties::calculate_curator_deposit(&fee); assert_eq!(Balances::free_balance(4), 10 - expected_deposit); assert_noop!( - Bounties::award_bounty(RuntimeOrigin::signed(1), 0, 3), + Bounties::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator ); - assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -746,18 +728,14 @@ fn award_and_claim_bounty_works() { } ); - assert_noop!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0), Error::::Premature); + assert_noop!(Bounties::claim_bounty(Origin::signed(1), 0), Error::::Premature); System::set_block_number(5); >::on_initialize(5); - assert_ok!(Balances::transfer( - RuntimeOrigin::signed(0), - Bounties::bounty_account_id(0), - 10 - )); + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); - assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0)); + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); assert_eq!( last_event(), @@ -780,17 +758,17 @@ fn claim_handles_high_fee() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 30); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 49)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 49)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); - assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); System::set_block_number(5); >::on_initialize(5); @@ -799,7 +777,7 @@ fn claim_handles_high_fee() { let res = Balances::slash(&Bounties::bounty_account_id(0), 10); assert_eq!(res.0.peek(), 10); - assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0)); + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); assert_eq!( last_event(), @@ -822,18 +800,14 @@ fn cancel_and_refund() { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Balances::transfer( - RuntimeOrigin::signed(0), - Bounties::bounty_account_id(0), - 10 - )); + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -849,9 +823,9 @@ fn cancel_and_refund() { assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 60); - assert_noop!(Bounties::close_bounty(RuntimeOrigin::signed(0), 0), BadOrigin); + assert_noop!(Bounties::close_bounty(Origin::signed(0), 0), BadOrigin); - assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); // `- 25 + 10` assert_eq!(Treasury::pot(), 85); @@ -863,30 +837,27 @@ fn award_and_cancel() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 0, 10)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(0), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 0, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(0), 0)); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); - assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(0), 0, 3)); + assert_ok!(Bounties::award_bounty(Origin::signed(0), 0, 3)); // Cannot close bounty directly when payout is happening... - assert_noop!( - Bounties::close_bounty(RuntimeOrigin::root(), 0), - Error::::PendingPayout - ); + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::PendingPayout); // Instead unassign the curator to slash them and then close. - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); - assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); assert_eq!(last_event(), BountiesEvent::BountyCanceled { index: 0 }); @@ -906,15 +877,15 @@ fn expire_and_unassign() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 1, 10)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(1), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 5); @@ -922,15 +893,12 @@ fn expire_and_unassign() { System::set_block_number(22); >::on_initialize(22); - assert_noop!( - Bounties::unassign_curator(RuntimeOrigin::signed(0), 0), - Error::::Premature - ); + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); System::set_block_number(23); >::on_initialize(23); - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(0), 0)); + assert_ok!(Bounties::unassign_curator(Origin::signed(0), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -955,20 +923,20 @@ fn extend_expiry() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); assert_noop!( - Bounties::extend_bounty_expiry(RuntimeOrigin::signed(1), 0, Vec::new()), + Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus ); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 10)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); assert_eq!(Balances::free_balance(4), 5); assert_eq!(Balances::reserved_balance(4), 5); @@ -977,10 +945,10 @@ fn extend_expiry() { >::on_initialize(10); assert_noop!( - Bounties::extend_bounty_expiry(RuntimeOrigin::signed(0), 0, Vec::new()), + Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator ); - assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); assert_eq!( Bounties::bounties(0).unwrap(), @@ -994,7 +962,7 @@ fn extend_expiry() { } ); - assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); assert_eq!( Bounties::bounties(0).unwrap(), @@ -1011,11 +979,8 @@ fn extend_expiry() { System::set_block_number(25); >::on_initialize(25); - assert_noop!( - Bounties::unassign_curator(RuntimeOrigin::signed(0), 0), - Error::::Premature - ); - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(4), 0)); + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); assert_eq!(Balances::free_balance(4), 10); // not slashed assert_eq!(Balances::reserved_balance(4), 0); @@ -1088,14 +1053,14 @@ fn unassign_curator_self() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 1, 10)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(1), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 5); @@ -1103,7 +1068,7 @@ fn unassign_curator_self() { System::set_block_number(8); >::on_initialize(8); - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0)); + assert_ok!(Bounties::unassign_curator(Origin::signed(1), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -1136,14 +1101,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&user, 100); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); + assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); + assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!(Balances::free_balance(&user), 100 - expected_deposit); @@ -1158,14 +1123,14 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&user, 100); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); System::set_block_number(4); >::on_initialize(4); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); + assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); + assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); let expected_deposit = CuratorDepositMin::get(); assert_eq!(Balances::free_balance(&user), 100 - expected_deposit); @@ -1182,14 +1147,14 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&user, starting_balance); Balances::make_free_balance_be(&0, starting_balance); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); System::set_block_number(6); >::on_initialize(6); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); + assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); + assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); let expected_deposit = CuratorDepositMax::get(); assert_eq!(Balances::free_balance(&user), starting_balance - expected_deposit); @@ -1209,8 +1174,8 @@ fn approve_bounty_works_second_instance() { assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201); - assert_ok!(Bounties1::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties1::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties1::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties1::approve_bounty(Origin::root(), 0)); >::on_initialize(2); >::on_initialize(2); diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 71f4bf01899fb..d3e054cfc6351 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/bounties/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/bounties/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -67,104 +64,89 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) - /// The range of component `d` is `[0, 300]`. - fn propose_bounty(d: u32, ) -> Weight { - // Minimum execution time: 33_514 nanoseconds. - Weight::from_ref_time(34_906_466 as u64) - // Standard Error: 226 - .saturating_add(Weight::from_ref_time(2_241 as u64).saturating_mul(d as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + fn propose_bounty(_d: u32, ) -> Weight { + (28_903_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - // Minimum execution time: 14_129 nanoseconds. - Weight::from_ref_time(14_424_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (10_997_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - // Minimum execution time: 13_675 nanoseconds. - Weight::from_ref_time(13_964_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (8_967_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - // Minimum execution time: 38_926 nanoseconds. - Weight::from_ref_time(40_183_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (28_665_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - // Minimum execution time: 33_774 nanoseconds. - Weight::from_ref_time(34_295_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (25_141_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - // Minimum execution time: 28_558 nanoseconds. - Weight::from_ref_time(29_293_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (21_295_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - // Minimum execution time: 77_042 nanoseconds. - Weight::from_ref_time(77_730_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (67_951_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - // Minimum execution time: 43_632 nanoseconds. - Weight::from_ref_time(44_196_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (33_654_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - // Minimum execution time: 59_519 nanoseconds. - Weight::from_ref_time(59_967_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (50_582_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - // Minimum execution time: 25_263 nanoseconds. - Weight::from_ref_time(25_788_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_322_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Bounties BountyApprovals (r:1 w:1) - // Storage: Bounties Bounties (r:2 w:2) - // Storage: System Account (r:4 w:4) - /// The range of component `b` is `[0, 100]`. + // Storage: Bounties Bounties (r:1 w:1) + // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - // Minimum execution time: 8_953 nanoseconds. - Weight::from_ref_time(23_242_227 as u64) - // Standard Error: 13_187 - .saturating_add(Weight::from_ref_time(26_727_999 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(b as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(b as u64))) + (0 as Weight) + // Standard Error: 17_000 + .saturating_add((29_233_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } } @@ -174,103 +156,88 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) - /// The range of component `d` is `[0, 300]`. - fn propose_bounty(d: u32, ) -> Weight { - // Minimum execution time: 33_514 nanoseconds. - Weight::from_ref_time(34_906_466 as u64) - // Standard Error: 226 - .saturating_add(Weight::from_ref_time(2_241 as u64).saturating_mul(d as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + fn propose_bounty(_d: u32, ) -> Weight { + (28_903_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - // Minimum execution time: 14_129 nanoseconds. - Weight::from_ref_time(14_424_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (10_997_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - // Minimum execution time: 13_675 nanoseconds. - Weight::from_ref_time(13_964_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (8_967_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - // Minimum execution time: 38_926 nanoseconds. - Weight::from_ref_time(40_183_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (28_665_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - // Minimum execution time: 33_774 nanoseconds. - Weight::from_ref_time(34_295_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (25_141_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - // Minimum execution time: 28_558 nanoseconds. - Weight::from_ref_time(29_293_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (21_295_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - // Minimum execution time: 77_042 nanoseconds. - Weight::from_ref_time(77_730_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (67_951_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - // Minimum execution time: 43_632 nanoseconds. - Weight::from_ref_time(44_196_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (33_654_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - // Minimum execution time: 59_519 nanoseconds. - Weight::from_ref_time(59_967_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (50_582_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - // Minimum execution time: 25_263 nanoseconds. - Weight::from_ref_time(25_788_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_322_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Bounties BountyApprovals (r:1 w:1) - // Storage: Bounties Bounties (r:2 w:2) - // Storage: System Account (r:4 w:4) - /// The range of component `b` is `[0, 100]`. + // Storage: Bounties Bounties (r:1 w:1) + // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - // Minimum execution time: 8_953 nanoseconds. - Weight::from_ref_time(23_242_227 as u64) - // Standard Error: 13_187 - .saturating_add(Weight::from_ref_time(26_727_999 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(b as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(b as u64))) + (0 as Weight) + // Standard Error: 17_000 + .saturating_add((29_233_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } } diff --git a/frame/child-bounties/Cargo.toml b/frame/child-bounties/Cargo.toml index ee9a838744d25..575f3e38c8183 100644 --- a/frame/child-bounties/Cargo.toml +++ b/frame/child-bounties/Cargo.toml @@ -34,7 +34,6 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index 697ed40e0071f..ca5af50276b9d 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -160,7 +160,7 @@ fn setup_pot_account() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 2dfe0660ad68e..4f25fdcf8903a 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -80,7 +80,6 @@ pub use pallet::*; type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -146,7 +145,7 @@ pub mod pallet { type ChildBountyValueMinimum: Get>; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -316,7 +315,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] parent_bounty_id: BountyIndex, #[pallet::compact] child_bounty_id: BountyIndex, - curator: AccountIdLookupOf, + curator: ::Source, #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; @@ -575,7 +574,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] parent_bounty_id: BountyIndex, #[pallet::compact] child_bounty_id: BountyIndex, - beneficiary: AccountIdLookupOf, + beneficiary: ::Source, ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index 67983b15f4579..2584445071471 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -21,6 +21,7 @@ use super::*; use crate as pallet_child_bounties; +use std::cell::RefCell; use frame_support::{ assert_noop, assert_ok, @@ -59,7 +60,7 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const MaximumBlockWeight: Weight = Weight::from_ref_time(1024); + pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } @@ -71,16 +72,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -98,12 +99,15 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); @@ -115,7 +119,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -136,7 +140,7 @@ parameter_types! { } impl pallet_bounties::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BountyDepositBase = ConstU64<80>; type BountyDepositPayoutDelay = ConstU64<3>; type BountyUpdatePeriod = ConstU64<10>; @@ -150,7 +154,7 @@ impl pallet_bounties::Config for Test { type ChildBountyManager = ChildBounties; } impl pallet_child_bounties::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type MaxActiveChildBountyCount = ConstU32<2>; type ChildBountyValueMinimum = ConstU64<1>; type WeightInfo = (); @@ -172,7 +176,7 @@ fn last_event() -> ChildBountiesEvent { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::ChildBounties(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::ChildBounties(inner) = e { Some(inner) } else { None }) .last() .unwrap() } @@ -211,19 +215,19 @@ fn add_child_bounty() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 8; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // This verifies that the accept curator logic took a deposit. let expected_deposit = CuratorDepositMultiplier::get() * fee; @@ -234,7 +238,7 @@ fn add_child_bounty() { // Acc-4 is the parent curator. // Call from invalid origin & check for error "RequireCurator". assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(0), 0, 10, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(Origin::signed(0), 0, 10, b"12345-p1".to_vec()), BountiesError::RequireCurator, ); @@ -246,22 +250,17 @@ fn add_child_bounty() { assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 50, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(Origin::signed(4), 0, 50, b"12345-p1".to_vec()), pallet_balances::Error::::KeepAlive, ); assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 100, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(Origin::signed(4), 0, 100, b"12345-p1".to_vec()), Error::::InsufficientBountyBalance, ); // Add child-bounty with valid value, which can be funded by parent bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); // Check for the event child-bounty added. assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); @@ -304,16 +303,16 @@ fn child_bounty_assign_curator() { Balances::make_free_balance_be(&4, 101); Balances::make_free_balance_be(&8, 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 4; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Bounty account status before adding child-bounty. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); @@ -327,12 +326,7 @@ fn child_bounty_assign_curator() { // Add child-bounty. // Acc-4 is the parent curator & make sure enough deposit. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); @@ -345,7 +339,7 @@ fn child_bounty_assign_curator() { assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); let fee = 6u64; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); assert_eq!( ChildBounties::child_bounties(0, 0).unwrap(), @@ -363,11 +357,11 @@ fn child_bounty_assign_curator() { assert_eq!(Balances::reserved_balance(4), expected_deposit); assert_noop!( - ChildBounties::accept_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::accept_curator(Origin::signed(3), 0, 0), BountiesError::RequireCurator, ); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; @@ -413,39 +407,34 @@ fn award_claim_child_bounty() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. let fee = 8; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); // Award child-bounty. // Test for non child-bounty curator. assert_noop!( - ChildBounties::award_child_bounty(RuntimeOrigin::signed(3), 0, 0, 7), + ChildBounties::award_child_bounty(Origin::signed(3), 0, 0, 7), BountiesError::RequireCurator, ); - assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty(Origin::signed(8), 0, 0, 7)); let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -466,13 +455,13 @@ fn award_claim_child_bounty() { // Claim child-bounty. // Test for Premature condition. assert_noop!( - ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0), + ChildBounties::claim_child_bounty(Origin::signed(7), 0, 0), BountiesError::Premature ); System::set_block_number(9); - assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::claim_child_bounty(Origin::signed(7), 0, 0)); // Ensure child-bounty curator is paid with curator fee & deposit refund. assert_eq!(Balances::free_balance(8), 101 + fee); @@ -504,24 +493,19 @@ fn close_child_bounty_added() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); @@ -529,11 +513,11 @@ fn close_child_bounty_added() { // Close child-bounty. // Wrong origin. - assert_noop!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(7), 0, 0), BadOrigin); - assert_noop!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(8), 0, 0), BadOrigin); + assert_noop!(ChildBounties::close_child_bounty(Origin::signed(7), 0, 0), BadOrigin); + assert_noop!(ChildBounties::close_child_bounty(Origin::signed(8), 0, 0), BadOrigin); // Correct origin - parent curator. - assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(Origin::signed(4), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); @@ -561,33 +545,28 @@ fn close_child_bounty_active() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, 2)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); // Close child-bounty in active state. - assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(Origin::signed(4), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); @@ -619,38 +598,33 @@ fn close_child_bounty_pending() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let parent_fee = 6; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, parent_fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, parent_fee)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. let child_fee = 4; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, child_fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, child_fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); - assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty(Origin::signed(8), 0, 0, 7)); // Close child-bounty in pending_payout state. assert_noop!( - ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0), + ChildBounties::close_child_bounty(Origin::signed(4), 0, 0), BountiesError::PendingPayout ); @@ -680,30 +654,25 @@ fn child_bounty_added_unassign_curator() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Unassign curator in added state. assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0), + ChildBounties::unassign_curator(Origin::signed(4), 0, 0), BountiesError::UnexpectedStatus ); }); @@ -722,29 +691,24 @@ fn child_bounty_curator_proposed_unassign_curator() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, 2)); assert_eq!( ChildBounties::child_bounties(0, 0).unwrap(), @@ -758,10 +722,10 @@ fn child_bounty_curator_proposed_unassign_curator() { ); // Random account cannot unassign the curator when in proposed state. - assert_noop!(ChildBounties::unassign_curator(RuntimeOrigin::signed(99), 0, 0), BadOrigin); + assert_noop!(ChildBounties::unassign_curator(Origin::signed(99), 0, 0), BadOrigin); // Unassign curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::signed(4), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -799,23 +763,18 @@ fn child_bounty_active_unassign_curator() { Balances::make_free_balance_be(&7, 101); // Child-bounty curator 2. Balances::make_free_balance_be(&8, 101); // Child-bounty curator 3. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Create Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(3); @@ -823,8 +782,8 @@ fn child_bounty_active_unassign_curator() { // Propose and accept curator for child-bounty. let fee = 6; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -842,7 +801,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(4); // Unassign curator - from reject origin. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::root(), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::root(), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -862,8 +821,8 @@ fn child_bounty_active_unassign_curator() { // Propose and accept curator for child-bounty again. let fee = 2; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 7, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 7, fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(7), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( @@ -881,7 +840,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(5); // Unassign curator again - from parent curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::signed(4), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -900,8 +859,8 @@ fn child_bounty_active_unassign_curator() { assert_eq!(Balances::reserved_balance(7), 0); // slashed // Propose and accept curator for child-bounty again. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 6, 2)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 6, 2)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(6), 0, 0)); assert_eq!( ChildBounties::child_bounties(0, 0).unwrap(), @@ -918,7 +877,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(6); // Unassign curator again - from child-bounty curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(6), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::signed(6), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -938,8 +897,8 @@ fn child_bounty_active_unassign_curator() { // Propose and accept curator for child-bounty one last time. let fee = 2; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 6, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 6, fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(6), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( @@ -959,7 +918,7 @@ fn child_bounty_active_unassign_curator() { // Unassign curator again - from non curator; non reject origin; some random guy. // Bounty update period is not yet complete. assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::unassign_curator(Origin::signed(3), 0, 0), BountiesError::Premature ); @@ -967,7 +926,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(20); // Unassign child curator from random account after inactivity. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::signed(3), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -1005,22 +964,17 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { Balances::make_free_balance_be(&7, 101); // Child-bounty curator 2. Balances::make_free_balance_be(&8, 101); // Child-bounty curator 3. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Create Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(3); @@ -1028,8 +982,8 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Propose and accept curator for child-bounty. let fee = 8; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -1047,7 +1001,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { >::on_initialize(4); // Unassign parent bounty curator. - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); System::set_block_number(5); >::on_initialize(5); @@ -1055,12 +1009,12 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Try unassign child-bounty curator - from non curator; non reject // origin; some random guy. Bounty update period is not yet complete. assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::unassign_curator(Origin::signed(3), 0, 0), Error::::ParentBountyNotActive ); // Unassign curator - from reject origin. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::root(), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::root(), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -1082,16 +1036,16 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { >::on_initialize(6); // Propose and accept curator for parent-bounty again. - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 5, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(5), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 5, 6)); + assert_ok!(Bounties::accept_curator(Origin::signed(5), 0)); System::set_block_number(7); >::on_initialize(7); // Propose and accept curator for child-bounty again. let fee = 2; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(5), 0, 0, 7, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(5), 0, 0, 7, fee)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(7), 0, 0)); let expected_deposit = CuratorDepositMin::get(); assert_eq!( @@ -1109,18 +1063,18 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { >::on_initialize(8); assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::unassign_curator(Origin::signed(3), 0, 0), BountiesError::Premature ); // Unassign parent bounty curator again. - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(5), 0)); + assert_ok!(Bounties::unassign_curator(Origin::signed(5), 0)); System::set_block_number(9); >::on_initialize(9); // Unassign curator again - from parent curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(Origin::signed(7), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -1153,29 +1107,24 @@ fn close_parent_with_child_bounty() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); // Try add child-bounty. // Should fail, parent bounty not active yet. assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 10, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec()), Error::::ParentBountyNotActive ); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(4); @@ -1184,21 +1133,21 @@ fn close_parent_with_child_bounty() { // Try close parent-bounty. // Child bounty active, can't close parent. assert_noop!( - Bounties::close_bounty(RuntimeOrigin::root(), 0), + Bounties::close_bounty(Origin::root(), 0), BountiesError::HasActiveChildBounty ); System::set_block_number(2); // Close child-bounty. - assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::root(), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(Origin::root(), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); // Try close parent-bounty again. // Should pass this time. - assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); }); } @@ -1217,22 +1166,17 @@ fn children_curator_fee_calculation_test() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), - 0, - 10, - b"12345-p1".to_vec() - )); + assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(4); @@ -1241,13 +1185,13 @@ fn children_curator_fee_calculation_test() { let fee = 6; // Propose curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); // Check curator fee added to the sum. assert_eq!(ChildBounties::children_curator_fees(0), fee); // Accept curator for child-bounty. - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); // Award child-bounty. - assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty(Origin::signed(8), 0, 0, 7)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; @@ -1269,18 +1213,18 @@ fn children_curator_fee_calculation_test() { System::set_block_number(9); // Claim child-bounty. - assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::claim_child_bounty(Origin::signed(7), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); // Award the parent bounty. - assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 9)); + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 9)); System::set_block_number(15); // Claim the parent bounty. - assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(9), 0)); + assert_ok!(Bounties::claim_bounty(Origin::signed(9), 0)); // Ensure parent-bounty curator received correctly reduced fee. assert_eq!(Balances::free_balance(4), 101 + 6 - fee); // 101 + 6 - 2 @@ -1307,22 +1251,22 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&Treasury::account_id(), parent_value * 3); Balances::make_free_balance_be(&parent_curator, parent_fee * 100); assert_ok!(Bounties::propose_bounty( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_value, b"12345".to_vec() )); - assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), parent_index)); + assert_ok!(Bounties::approve_bounty(Origin::root(), parent_index)); System::set_block_number(2); >::on_initialize(2); assert_ok!(Bounties::propose_curator( - RuntimeOrigin::root(), + Origin::root(), parent_index, parent_curator, parent_fee )); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(parent_curator), parent_index)); + assert_ok!(Bounties::accept_curator(Origin::signed(parent_curator), parent_index)); // Now we can start creating some child bounties. // Case 1: Parent and child curator are not the same. @@ -1335,7 +1279,7 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&child_curator, starting_balance); assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1343,14 +1287,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(3); >::on_initialize(3); assert_ok!(ChildBounties::propose_curator( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - RuntimeOrigin::signed(child_curator), + Origin::signed(child_curator), parent_index, child_index )); @@ -1370,7 +1314,7 @@ fn accept_curator_handles_different_deposit_calculations() { let reserved_before = Balances::reserved_balance(&parent_curator); assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1378,14 +1322,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(4); >::on_initialize(4); assert_ok!(ChildBounties::propose_curator( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - RuntimeOrigin::signed(child_curator), + Origin::signed(child_curator), parent_index, child_index )); @@ -1403,7 +1347,7 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&child_curator, starting_balance); assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1411,14 +1355,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(5); >::on_initialize(5); assert_ok!(ChildBounties::propose_curator( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - RuntimeOrigin::signed(child_curator), + Origin::signed(child_curator), parent_index, child_index )); @@ -1439,7 +1383,7 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&child_curator, starting_balance); assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1447,14 +1391,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(5); >::on_initialize(5); assert_ok!(ChildBounties::propose_curator( - RuntimeOrigin::signed(parent_curator), + Origin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - RuntimeOrigin::signed(child_curator), + Origin::signed(child_curator), parent_index, child_index )); diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index 235c84320effa..ad08e00149a30 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_child_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/child-bounties/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/child-bounties/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,59 +63,52 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBountyCount (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) - /// The range of component `d` is `[0, 300]`. fn add_child_bounty(d: u32, ) -> Weight { - // Minimum execution time: 59_121 nanoseconds. - Weight::from_ref_time(60_212_235 as u64) - // Standard Error: 149 - .saturating_add(Weight::from_ref_time(412 as u64).saturating_mul(d as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (51_064_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - // Minimum execution time: 20_785 nanoseconds. - Weight::from_ref_time(21_000_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (15_286_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - // Minimum execution time: 37_874 nanoseconds. - Weight::from_ref_time(38_322_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (29_929_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - // Minimum execution time: 43_385 nanoseconds. - Weight::from_ref_time(43_774_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (32_449_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - // Minimum execution time: 31_390 nanoseconds. - Weight::from_ref_time(31_802_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (23_793_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - // Minimum execution time: 74_956 nanoseconds. - Weight::from_ref_time(75_850_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (67_529_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -127,10 +117,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - // Minimum execution time: 57_215 nanoseconds. - Weight::from_ref_time(58_285_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (48_436_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -139,10 +128,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - // Minimum execution time: 67_641 nanoseconds. - Weight::from_ref_time(69_184_000 as u64) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(7 as u64)) + (58_044_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(7 as Weight)) } } @@ -154,59 +142,52 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBountyCount (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) - /// The range of component `d` is `[0, 300]`. fn add_child_bounty(d: u32, ) -> Weight { - // Minimum execution time: 59_121 nanoseconds. - Weight::from_ref_time(60_212_235 as u64) - // Standard Error: 149 - .saturating_add(Weight::from_ref_time(412 as u64).saturating_mul(d as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (51_064_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - // Minimum execution time: 20_785 nanoseconds. - Weight::from_ref_time(21_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (15_286_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - // Minimum execution time: 37_874 nanoseconds. - Weight::from_ref_time(38_322_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (29_929_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - // Minimum execution time: 43_385 nanoseconds. - Weight::from_ref_time(43_774_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (32_449_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - // Minimum execution time: 31_390 nanoseconds. - Weight::from_ref_time(31_802_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (23_793_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - // Minimum execution time: 74_956 nanoseconds. - Weight::from_ref_time(75_850_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (67_529_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -215,10 +196,9 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - // Minimum execution time: 57_215 nanoseconds. - Weight::from_ref_time(58_285_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (48_436_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -227,9 +207,8 @@ impl WeightInfo for () { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - // Minimum execution time: 67_641 nanoseconds. - Weight::from_ref_time(69_184_000 as u64) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(7 as u64)) + (58_044_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(7 as Weight)) } } diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index aca2434127f03..0cb2a8b136044 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index 4e8bf094ef9d6..076afcd203030 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -30,75 +30,74 @@ const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } benchmarks_instance_pallet! { set_members { - let m in 0 .. T::MaxMembers::get(); - let n in 0 .. T::MaxMembers::get(); - let p in 0 .. T::MaxProposals::get(); + let m in 1 .. T::MaxMembers::get(); + let n in 1 .. T::MaxMembers::get(); + let p in 1 .. T::MaxProposals::get(); // Set old members. // We compute the difference of old and new members, so it should influence timing. let mut old_members = vec![]; + let mut last_old_member = account::("old member", 0, SEED); for i in 0 .. m { - let old_member = account::("old member", i, SEED); - old_members.push(old_member); + last_old_member = account::("old member", i, SEED); + old_members.push(last_old_member.clone()); } let old_members_count = old_members.len() as u32; Collective::::set_members( SystemOrigin::Root.into(), old_members.clone(), - old_members.last().cloned(), + Some(last_old_member.clone()), T::MaxMembers::get(), )?; - // If there were any old members generate a bunch of proposals. - if m > 0 { - // Set a high threshold for proposals passing so that they stay around. - let threshold = m.max(2); - // Length of the proposals should be irrelevant to `set_members`. - let length = 100; - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { - remark: vec![i as u8; length] - }.into(); - Collective::::propose( - SystemOrigin::Signed(old_members.last().unwrap().clone()).into(), - threshold, - Box::new(proposal.clone()), - MAX_BYTES, + // Set a high threshold for proposals passing so that they stay around. + let threshold = m.max(2); + // Length of the proposals should be irrelevant to `set_members`. + let length = 100; + for i in 0 .. p { + // Proposals should be different so that different proposal hashes are generated + let proposal: T::Proposal = SystemCall::::remark { + remark: vec![i as u8; length] + }.into(); + Collective::::propose( + SystemOrigin::Signed(last_old_member.clone()).into(), + threshold, + Box::new(proposal.clone()), + MAX_BYTES, + )?; + let hash = T::Hashing::hash_of(&proposal); + // Vote on the proposal to increase state relevant for `set_members`. + // Not voting for `last_old_member` because they proposed and not voting for the first member + // to keep the proposal from passing. + for j in 2 .. m - 1 { + let voter = &old_members[j as usize]; + let approve = true; + Collective::::vote( + SystemOrigin::Signed(voter.clone()).into(), + hash, + i, + approve, )?; - let hash = T::Hashing::hash_of(&proposal); - // Vote on the proposal to increase state relevant for `set_members`. - // Not voting for last old member because they proposed and not voting for the first member - // to keep the proposal from passing. - for j in 2 .. m - 1 { - let voter = &old_members[j as usize]; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - hash, - i, - approve, - )?; - } } } // Construct `new_members`. // It should influence timing since it will sort this vector. let mut new_members = vec![]; + let mut last_member = account::("member", 0, SEED); for i in 0 .. n { - let member = account::("member", i, SEED); - new_members.push(member); + last_member = account::("member", i, SEED); + new_members.push(last_member.clone()); } - }: _(SystemOrigin::Root, new_members.clone(), new_members.last().cloned(), T::MaxMembers::get()) + }: _(SystemOrigin::Root, new_members.clone(), Some(last_member), T::MaxMembers::get()) verify { new_members.sort(); assert_eq!(Collective::::members(), new_members); @@ -356,7 +355,7 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); @@ -437,7 +436,7 @@ benchmarks_instance_pallet! { index, approve, )?; - }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); @@ -489,19 +488,9 @@ benchmarks_instance_pallet! { let index = p - 1; // Have almost everyone vote aye on last proposal, while keeping it from passing. // A few abstainers will be the nay votes needed to fail the vote. - let mut yes_votes: MemberCount = 0; for j in 2 .. m - 1 { let voter = &members[j as usize]; let approve = true; - yes_votes += 1; - // vote aye till a prime nay vote keeps the proposal disapproved. - if <>::DefaultVote as DefaultVote>::default_vote( - Some(false), - yes_votes, - 0, - m,) { - break; - } Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash, @@ -522,7 +511,7 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), p as usize); // Prime nay will close it as disapproved - }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); @@ -594,7 +583,7 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), p as usize); // Prime aye will close it as approved - }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::max_value(), bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 06d5b1fab78e7..0323be1382392 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -49,15 +49,12 @@ use sp_std::{marker::PhantomData, prelude::*, result}; use frame_support::{ codec::{Decode, Encode, MaxEncodedLen}, - dispatch::{ - DispatchError, DispatchResultWithPostInfo, Dispatchable, GetDispatchInfo, Pays, - PostDispatchInfo, - }, + dispatch::{DispatchError, DispatchResultWithPostInfo, Dispatchable, PostDispatchInfo}, ensure, traits::{ Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, }, - weights::{OldWeight, Weight}, + weights::{GetDispatchInfo, Pays, Weight}, }; #[cfg(test)] @@ -180,20 +177,17 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - /// The runtime origin type. - type RuntimeOrigin: From>; + /// The outer origin type. + type Origin: From>; - /// The runtime call dispatch type. + /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable< - RuntimeOrigin = >::RuntimeOrigin, - PostInfo = PostDispatchInfo, - > + From> + + Dispatchable>::Origin, PostInfo = PostDispatchInfo> + + From> + GetDispatchInfo; - /// The runtime event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + /// The outer event type. + type Event: From> + IsType<::Event>; /// The time-out for council motions. type MotionDuration: Get; @@ -620,20 +614,17 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(m, p2)) .max(T::WeightInfo::close_approved(b, m, p2)) .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1.into()) + .saturating_add(p1) }, DispatchClass::Operational ))] - #[allow(deprecated)] - #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `close`")] - pub fn close_old_weight( + pub fn close( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: OldWeight, + #[pallet::compact] proposal_weight_bound: Weight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { - let proposal_weight_bound: Weight = proposal_weight_bound.into(); let _ = ensure_signed(origin)?; Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) @@ -662,64 +653,6 @@ pub mod pallet { let proposal_count = Self::do_disapprove_proposal(proposal_hash); Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) } - - /// Close a vote that is either approved, disapproved or whose voting period has ended. - /// - /// May be called by any signed account in order to finish voting and close the proposal. - /// - /// If called before the end of the voting period it will only close the vote if it is - /// has enough votes to be approved or disapproved. - /// - /// If called after the end of the voting period abstentions are counted as rejections - /// unless there is a prime member set and the prime member cast an approval. - /// - /// If the close operation completes successfully with disapproval, the transaction fee will - /// be waived. Otherwise execution of the approved operation will be charged to the caller. - /// - /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed - /// proposal. - /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via - /// `storage::read` so it is `size_of::() == 4` larger than the pure length. - /// - /// # - /// ## Weight - /// - `O(B + M + P1 + P2)` where: - /// - `B` is `proposal` size in bytes (length-fee-bounded) - /// - `M` is members-count (code- and governance-bounded) - /// - `P1` is the complexity of `proposal` preimage. - /// - `P2` is proposal-count (code-bounded) - /// - DB: - /// - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`) - /// - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec - /// `O(P2)`) - /// - any mutations done while executing `proposal` (`P1`) - /// - up to 3 events - /// # - #[pallet::weight(( - { - let b = *length_bound; - let m = T::MaxMembers::get(); - let p1 = *proposal_weight_bound; - let p2 = T::MaxProposals::get(); - T::WeightInfo::close_early_approved(b, m, p2) - .max(T::WeightInfo::close_early_disapproved(m, p2)) - .max(T::WeightInfo::close_approved(b, m, p2)) - .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1) - }, - DispatchClass::Operational - ))] - pub fn close( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] index: ProposalIndex, - proposal_weight_bound: Weight, - #[pallet::compact] length_bound: u32, - ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; - - Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) - } } } @@ -948,7 +881,7 @@ impl, I: 'static> Pallet { ensure!(proposal_len <= length_bound, Error::::WrongProposalLength); let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalMissing)?; let proposal_weight = proposal.get_dispatch_info().weight; - ensure!(proposal_weight.all_lte(weight_bound), Error::::WrongProposalWeight); + ensure!(proposal_weight <= weight_bound, Error::::WrongProposalWeight); Ok((proposal, proposal_len as usize)) } diff --git a/frame/collective/src/migrations/v4.rs b/frame/collective/src/migrations/v4.rs index 483c3f9fa9e69..4e6cd05584138 100644 --- a/frame/collective/src/migrations/v4.rs +++ b/frame/collective/src/migrations/v4.rs @@ -45,7 +45,7 @@ pub fn migrate::on_chain_storage_version(); @@ -70,7 +70,7 @@ pub fn migrate; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test where @@ -62,9 +61,8 @@ mod mock_democracy { #[pallet::config] pub trait Config: frame_system::Config + Sized { - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; - type ExternalMajorityOrigin: EnsureOrigin; + type Event: From> + IsType<::Event>; + type ExternalMajorityOrigin: EnsureOrigin; } #[pallet::call] @@ -91,23 +89,23 @@ parameter_types! { pub const MotionDuration: u64 = 3; pub const MaxProposals: u32 = 100; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -120,9 +118,9 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = ConstU64<3>; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -130,9 +128,9 @@ impl Config for Test { type WeightInfo = (); } impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = ConstU64<3>; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -140,13 +138,13 @@ impl Config for Test { type WeightInfo = (); } impl mock_democracy::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExternalMajorityOrigin = EnsureProportionAtLeast; } impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Origin = Origin; + type Proposal = Call; + type Event = Event; type MotionDuration = ConstU64<3>; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -173,13 +171,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn make_proposal(value: u64) -> RuntimeCall { - RuntimeCall::System(frame_system::Call::remark_with_event { - remark: value.to_be_bytes().to_vec(), - }) +fn make_proposal(value: u64) -> Call { + Call::System(frame_system::Call::remark_with_event { remark: value.to_be_bytes().to_vec() }) } -fn record(event: RuntimeEvent) -> EventRecord { +fn record(event: Event) -> EventRecord { EventRecord { phase: Phase::Initialization, event, topics: vec![] } } @@ -200,60 +196,52 @@ fn close_works() { let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); System::set_block_number(3); assert_noop!( - Collective::close(RuntimeOrigin::signed(4), hash, 0, proposal_weight, proposal_len), + Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len), Error::::TooEarly ); System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 1 })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })) + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ] ); }); @@ -262,7 +250,7 @@ fn close_works() { #[test] fn proposal_weight_limit_works_on_approve() { new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Collective(crate::Call::set_members { + let proposal = Call::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, old_count: MaxMembers::get(), @@ -273,38 +261,26 @@ fn proposal_weight_limit_works_on_approve() { // Set 1 as prime voter Prime::::set(Some(1)); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); // With 1's prime vote, this should pass System::set_block_number(4); assert_noop!( - Collective::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight - Weight::from_ref_time(100), - proposal_len - ), + Collective::close(Origin::signed(4), hash, 0, proposal_weight - 100, proposal_len), Error::::WrongProposalWeight ); - assert_ok!(Collective::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); }) } #[test] fn proposal_weight_limit_ignored_on_disapprove() { new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Collective(crate::Call::set_members { + let proposal = Call::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, old_count: MaxMembers::get(), @@ -314,7 +290,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -322,10 +298,10 @@ fn proposal_weight_limit_ignored_on_disapprove() { // No votes, this proposal wont pass System::set_block_number(4); assert_ok!(Collective::close( - RuntimeOrigin::signed(4), + Origin::signed(4), hash, 0, - proposal_weight - Weight::from_ref_time(100), + proposal_weight - 100, proposal_len )); }) @@ -339,61 +315,53 @@ fn close_with_prime_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( - RuntimeOrigin::root(), + Origin::root(), vec![1, 2, 3], Some(3), MaxMembers::get() )); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 1 })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })) + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ] ); }); @@ -407,60 +375,54 @@ fn close_with_voting_prime_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( - RuntimeOrigin::root(), + Origin::root(), vec![1, 2, 3], Some(1), MaxMembers::get() )); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); System::set_block_number(4); - assert_ok!(Collective::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 3, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })) @@ -477,25 +439,25 @@ fn close_with_no_prime_but_majority_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(CollectiveMajority::set_members( - RuntimeOrigin::root(), + Origin::root(), vec![1, 2, 3, 4, 5], Some(5), MaxMembers::get() )); assert_ok!(CollectiveMajority::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 5, Box::new(proposal.clone()), proposal_len )); - assert_ok!(CollectiveMajority::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(CollectiveMajority::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(CollectiveMajority::vote(RuntimeOrigin::signed(3), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(Origin::signed(3), hash, 0, true)); System::set_block_number(4); assert_ok!(CollectiveMajority::close( - RuntimeOrigin::signed(4), + Origin::signed(4), hash, 0, proposal_weight, @@ -505,42 +467,42 @@ fn close_with_no_prime_but_majority_works() { assert_eq!( System::events(), vec![ - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Proposed { + record(Event::CollectiveMajority(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 5 })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { + record(Event::CollectiveMajority(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { + record(Event::CollectiveMajority(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { + record(Event::CollectiveMajority(CollectiveEvent::Voted { account: 3, proposal_hash: hash, voted: true, yes: 3, no: 0 })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Closed { + record(Event::CollectiveMajority(CollectiveEvent::Closed { proposal_hash: hash, yes: 5, no: 0 })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Approved { + record(Event::CollectiveMajority(CollectiveEvent::Approved { proposal_hash: hash })), - record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Executed { + record(Event::CollectiveMajority(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })) @@ -557,13 +519,13 @@ fn removal_of_old_voters_votes_works() { let hash = BlakeTwo256::hash_of(&proposal); let end = 4; assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) @@ -578,13 +540,13 @@ fn removal_of_old_voters_votes_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(3), hash, 1, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) @@ -605,23 +567,18 @@ fn removal_of_old_voters_votes_works_with_set_members() { let hash = BlakeTwo256::hash_of(&proposal); let end = 4; assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); - assert_ok!(Collective::set_members( - RuntimeOrigin::root(), - vec![2, 3, 4], - None, - MaxMembers::get() - )); + assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MaxMembers::get())); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) @@ -631,23 +588,18 @@ fn removal_of_old_voters_votes_works_with_set_members() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(3), hash, 1, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); - assert_ok!(Collective::set_members( - RuntimeOrigin::root(), - vec![2, 4], - None, - MaxMembers::get() - )); + assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MaxMembers::get())); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) @@ -663,7 +615,7 @@ fn propose_works() { let hash = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -677,7 +629,7 @@ fn propose_works() { assert_eq!( System::events(), - vec![record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + vec![record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, @@ -694,7 +646,7 @@ fn limit_active_proposals() { let proposal = make_proposal(i as u64); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -703,12 +655,7 @@ fn limit_active_proposals() { let proposal = make_proposal(MaxProposals::get() as u64 + 1); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_noop!( - Collective::propose( - RuntimeOrigin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len - ), + Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len), Error::::TooManyProposals ); }) @@ -717,18 +664,13 @@ fn limit_active_proposals() { #[test] fn correct_validate_and_get_proposal() { new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Collective(crate::Call::set_members { + let proposal = Call::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, old_count: MaxMembers::get(), }); let length = proposal.encode().len() as u32; - assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), - 3, - Box::new(proposal.clone()), - length - )); + assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), length)); let hash = BlakeTwo256::hash_of(&proposal); let weight = proposal.get_dispatch_info().weight; @@ -745,11 +687,7 @@ fn correct_validate_and_get_proposal() { Error::::WrongProposalLength ); assert_noop!( - Collective::validate_and_get_proposal( - &hash, - length, - weight - Weight::from_ref_time(10) - ), + Collective::validate_and_get_proposal(&hash, length, weight - 10), Error::::WrongProposalWeight ); let res = Collective::validate_and_get_proposal(&hash, length, weight); @@ -766,12 +704,7 @@ fn motions_ignoring_non_collective_proposals_works() { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_noop!( - Collective::propose( - RuntimeOrigin::signed(42), - 3, - Box::new(proposal.clone()), - proposal_len - ), + Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone()), proposal_len), Error::::NotMember ); }); @@ -784,13 +717,13 @@ fn motions_ignoring_non_collective_votes_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); assert_noop!( - Collective::vote(RuntimeOrigin::signed(42), hash, 0, true), + Collective::vote(Origin::signed(42), hash, 0, true), Error::::NotMember, ); }); @@ -804,13 +737,13 @@ fn motions_ignoring_bad_index_collective_vote_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); assert_noop!( - Collective::vote(RuntimeOrigin::signed(2), hash, 1, true), + Collective::vote(Origin::signed(2), hash, 1, true), Error::::WrongIndex, ); }); @@ -824,7 +757,7 @@ fn motions_vote_after_works() { let hash: H256 = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len @@ -835,45 +768,45 @@ fn motions_vote_after_works() { Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); // Cast first aye vote. - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) ); // Try to cast a duplicate aye vote. assert_noop!( - Collective::vote(RuntimeOrigin::signed(1), hash, 0, true), + Collective::vote(Origin::signed(1), hash, 0, true), Error::::DuplicateVote, ); // Cast a nay vote. - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) ); // Try to cast a duplicate nay vote. assert_noop!( - Collective::vote(RuntimeOrigin::signed(1), hash, 0, false), + Collective::vote(Origin::signed(1), hash, 0, false), Error::::DuplicateVote, ); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: false, @@ -893,7 +826,7 @@ fn motions_all_first_vote_free_works() { let hash: H256 = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len, @@ -905,40 +838,40 @@ fn motions_all_first_vote_free_works() { // For the motion, acc 2's first vote, expecting Ok with Pays::No. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(2), hash, 0, true); + Collective::vote(Origin::signed(2), hash, 0, true); assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); // Duplicate vote, expecting error with Pays::Yes. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(2), hash, 0, true); + Collective::vote(Origin::signed(2), hash, 0, true); assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); // Modifying vote, expecting ok with Pays::Yes. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(2), hash, 0, false); + Collective::vote(Origin::signed(2), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); // For the motion, acc 3's first vote, expecting Ok with Pays::No. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(3), hash, 0, true); + Collective::vote(Origin::signed(3), hash, 0, true); assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); // acc 3 modify the vote, expecting Ok with Pays::Yes. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(RuntimeOrigin::signed(3), hash, 0, false); + Collective::vote(Origin::signed(3), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info let proposal_weight = proposal.get_dispatch_info().weight; let close_rval: DispatchResultWithPostInfo = - Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len); + Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len); assert_eq!(close_rval.unwrap().pays_fee, Pays::No); // trying to close the proposal, which is already closed. // Expecting error "ProposalMissing" with Pays::Yes let close_rval: DispatchResultWithPostInfo = - Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len); + Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len); assert_eq!(close_rval.unwrap_err().post_info.pays_fee, Pays::Yes); }); } @@ -951,22 +884,16 @@ fn motions_reproposing_disapproved_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); - assert_ok!(Collective::close( - RuntimeOrigin::signed(2), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); assert_eq!(*Collective::proposals(), vec![]); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len @@ -978,7 +905,7 @@ fn motions_reproposing_disapproved_works() { #[test] fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {}); + let proposal = Call::Democracy(mock_democracy::Call::external_propose_majority {}); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); @@ -988,50 +915,44 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { // // Failed to execute with only 2 yes votes. assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Collective::close( - RuntimeOrigin::signed(2), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })), @@ -1042,61 +963,53 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { // Executed with 3 yes votes. assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, true)); - assert_ok!(Collective::close( - RuntimeOrigin::signed(2), - hash, - 1, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(3), hash, 1, true)); + assert_ok!(Collective::close(Origin::signed(2), hash, 1, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 1, proposal_hash: hash, threshold: 2 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 3, proposal_hash: hash, voted: true, yes: 3, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 3, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(RuntimeEvent::Democracy( - mock_democracy::pallet::Event::::ExternalProposed - )), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Democracy(mock_democracy::pallet::Event::::ExternalProposed)), + record(Event::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Ok(()) })), @@ -1113,52 +1026,44 @@ fn motions_disapproval_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); - assert_ok!(Collective::close( - RuntimeOrigin::signed(2), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: false, yes: 1, no: 1 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 1, no: 1 })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })), + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })), ] ); }); @@ -1172,51 +1077,45 @@ fn motions_approval_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Collective::close( - RuntimeOrigin::signed(2), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(RuntimeEvent::Collective(CollectiveEvent::Executed { + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })), @@ -1233,14 +1132,14 @@ fn motion_with_no_votes_closes_with_disapproval() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); assert_eq!( System::events()[0], - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, @@ -1251,7 +1150,7 @@ fn motion_with_no_votes_closes_with_disapproval() { // Closing the motion too early is not possible because it has neither // an approving or disapproving simple majority due to the lack of votes. assert_noop!( - Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len), + Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len), Error::::TooEarly ); @@ -1259,18 +1158,12 @@ fn motion_with_no_votes_closes_with_disapproval() { let closing_block = System::block_number() + MotionDuration::get(); System::set_block_number(closing_block); // we can successfully close the motion. - assert_ok!(Collective::close( - RuntimeOrigin::signed(2), - hash, - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); // Events show that the close ended in a disapproval. assert_eq!( System::events()[1], - record(RuntimeEvent::Collective(CollectiveEvent::Closed { + record(Event::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 0, no: 3 @@ -1278,7 +1171,7 @@ fn motion_with_no_votes_closes_with_disapproval() { ); assert_eq!( System::events()[2], - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ); }) } @@ -1293,28 +1186,28 @@ fn close_disapprove_does_not_care_about_weight_or_len() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); // First we make the proposal succeed - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); // It will not close with bad weight/len information assert_noop!( - Collective::close(RuntimeOrigin::signed(2), hash, 0, Weight::zero(), 0), + Collective::close(Origin::signed(2), hash, 0, 0, 0), Error::::WrongProposalLength, ); assert_noop!( - Collective::close(RuntimeOrigin::signed(2), hash, 0, Weight::zero(), proposal_len), + Collective::close(Origin::signed(2), hash, 0, 0, proposal_len), Error::::WrongProposalWeight, ); // Now we make the proposal fail - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); // It can close even if the weight/len information is bad - assert_ok!(Collective::close(RuntimeOrigin::signed(2), hash, 0, Weight::zero(), 0)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, 0, 0)); }) } @@ -1325,42 +1218,40 @@ fn disapprove_proposal_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); // Proposal would normally succeed - assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); // But Root can disapprove and remove it anyway - assert_ok!(Collective::disapprove_proposal(RuntimeOrigin::root(), hash)); + assert_ok!(Collective::disapprove_proposal(Origin::root(), hash)); assert_eq!( System::events(), vec![ - record(RuntimeEvent::Collective(CollectiveEvent::Proposed { + record(Event::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Voted { + record(Event::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { - proposal_hash: hash - })), + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })), ] ); }) diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 052550de7bd7e..2f5c6f590a999 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/collective/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/collective/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,162 +61,132 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Council Members (r:1 w:1) // Storage: Council Proposals (r:1 w:0) - // Storage: Council Prime (r:0 w:1) // Storage: Council Voting (r:100 w:100) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Minimum execution time: 18_895 nanoseconds. - Weight::from_ref_time(19_254_000 as u64) - // Standard Error: 63_540 - .saturating_add(Weight::from_ref_time(5_061_801 as u64).saturating_mul(m as u64)) - // Standard Error: 63_540 - .saturating_add(Weight::from_ref_time(7_588_981 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(p as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) + // Storage: Council Prime (r:0 w:1) + fn set_members(m: u32, n: u32, p: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 12_000 + .saturating_add((10_280_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 12_000 + .saturating_add((126_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 12_000 + .saturating_add((13_310_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } // Storage: Council Members (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 24_469 nanoseconds. - Weight::from_ref_time(23_961_134 as u64) - // Standard Error: 43 - .saturating_add(Weight::from_ref_time(1_677 as u64).saturating_mul(b as u64)) - // Standard Error: 450 - .saturating_add(Weight::from_ref_time(18_645 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (16_819_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((33_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 26_476 nanoseconds. - Weight::from_ref_time(25_829_298 as u64) - // Standard Error: 49 - .saturating_add(Weight::from_ref_time(1_741 as u64).saturating_mul(b as u64)) - // Standard Error: 515 - .saturating_add(Weight::from_ref_time(29_436 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + (18_849_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((56_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 33_585 nanoseconds. - Weight::from_ref_time(33_092_289 as u64) - // Standard Error: 173 - .saturating_add(Weight::from_ref_time(4_266 as u64).saturating_mul(b as u64)) - // Standard Error: 1_812 - .saturating_add(Weight::from_ref_time(29_262 as u64).saturating_mul(m as u64)) - // Standard Error: 1_789 - .saturating_add(Weight::from_ref_time(181_285 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (22_204_000 as Weight) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((49_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) - /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { - // Minimum execution time: 36_374 nanoseconds. - Weight::from_ref_time(38_950_243 as u64) - // Standard Error: 2_583 - .saturating_add(Weight::from_ref_time(65_345 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (30_941_000 as Weight) + // Standard Error: 2_000 + .saturating_add((77_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 36_066 nanoseconds. - Weight::from_ref_time(38_439_655 as u64) - // Standard Error: 1_281 - .saturating_add(Weight::from_ref_time(17_045 as u64).saturating_mul(m as u64)) - // Standard Error: 1_249 - .saturating_add(Weight::from_ref_time(164_998 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (32_485_000 as Weight) + // Standard Error: 1_000 + .saturating_add((39_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 47_753 nanoseconds. - Weight::from_ref_time(46_507_829 as u64) - // Standard Error: 149 - .saturating_add(Weight::from_ref_time(2_159 as u64).saturating_mul(b as u64)) - // Standard Error: 1_581 - .saturating_add(Weight::from_ref_time(37_842 as u64).saturating_mul(m as u64)) - // Standard Error: 1_541 - .saturating_add(Weight::from_ref_time(173_395 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (33_487_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((66_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((157_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Prime (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 39_416 nanoseconds. - Weight::from_ref_time(39_610_161 as u64) - // Standard Error: 1_231 - .saturating_add(Weight::from_ref_time(32_991 as u64).saturating_mul(m as u64)) - // Standard Error: 1_200 - .saturating_add(Weight::from_ref_time(170_773 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (33_494_000 as Weight) + // Standard Error: 1_000 + .saturating_add((58_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Prime (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 49_840 nanoseconds. - Weight::from_ref_time(48_542_914 as u64) - // Standard Error: 136 - .saturating_add(Weight::from_ref_time(2_650 as u64).saturating_mul(b as u64)) - // Standard Error: 1_442 - .saturating_add(Weight::from_ref_time(37_898 as u64).saturating_mul(m as u64)) - // Standard Error: 1_406 - .saturating_add(Weight::from_ref_time(182_176 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (36_566_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((63_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((158_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) - /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { - // Minimum execution time: 24_199 nanoseconds. - Weight::from_ref_time(26_869_176 as u64) - // Standard Error: 1_609 - .saturating_add(Weight::from_ref_time(163_341 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (20_159_000 as Weight) + // Standard Error: 1_000 + .saturating_add((173_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } } @@ -227,161 +194,131 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Council Members (r:1 w:1) // Storage: Council Proposals (r:1 w:0) - // Storage: Council Prime (r:0 w:1) // Storage: Council Voting (r:100 w:100) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Minimum execution time: 18_895 nanoseconds. - Weight::from_ref_time(19_254_000 as u64) - // Standard Error: 63_540 - .saturating_add(Weight::from_ref_time(5_061_801 as u64).saturating_mul(m as u64)) - // Standard Error: 63_540 - .saturating_add(Weight::from_ref_time(7_588_981 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(p as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) + // Storage: Council Prime (r:0 w:1) + fn set_members(m: u32, n: u32, p: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 12_000 + .saturating_add((10_280_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 12_000 + .saturating_add((126_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 12_000 + .saturating_add((13_310_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } // Storage: Council Members (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 24_469 nanoseconds. - Weight::from_ref_time(23_961_134 as u64) - // Standard Error: 43 - .saturating_add(Weight::from_ref_time(1_677 as u64).saturating_mul(b as u64)) - // Standard Error: 450 - .saturating_add(Weight::from_ref_time(18_645 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (16_819_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((33_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 26_476 nanoseconds. - Weight::from_ref_time(25_829_298 as u64) - // Standard Error: 49 - .saturating_add(Weight::from_ref_time(1_741 as u64).saturating_mul(b as u64)) - // Standard Error: 515 - .saturating_add(Weight::from_ref_time(29_436 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + (18_849_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((56_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 33_585 nanoseconds. - Weight::from_ref_time(33_092_289 as u64) - // Standard Error: 173 - .saturating_add(Weight::from_ref_time(4_266 as u64).saturating_mul(b as u64)) - // Standard Error: 1_812 - .saturating_add(Weight::from_ref_time(29_262 as u64).saturating_mul(m as u64)) - // Standard Error: 1_789 - .saturating_add(Weight::from_ref_time(181_285 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (22_204_000 as Weight) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((49_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) - /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { - // Minimum execution time: 36_374 nanoseconds. - Weight::from_ref_time(38_950_243 as u64) - // Standard Error: 2_583 - .saturating_add(Weight::from_ref_time(65_345 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (30_941_000 as Weight) + // Standard Error: 2_000 + .saturating_add((77_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 36_066 nanoseconds. - Weight::from_ref_time(38_439_655 as u64) - // Standard Error: 1_281 - .saturating_add(Weight::from_ref_time(17_045 as u64).saturating_mul(m as u64)) - // Standard Error: 1_249 - .saturating_add(Weight::from_ref_time(164_998 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (32_485_000 as Weight) + // Standard Error: 1_000 + .saturating_add((39_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 47_753 nanoseconds. - Weight::from_ref_time(46_507_829 as u64) - // Standard Error: 149 - .saturating_add(Weight::from_ref_time(2_159 as u64).saturating_mul(b as u64)) - // Standard Error: 1_581 - .saturating_add(Weight::from_ref_time(37_842 as u64).saturating_mul(m as u64)) - // Standard Error: 1_541 - .saturating_add(Weight::from_ref_time(173_395 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (33_487_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((66_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((157_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Prime (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 39_416 nanoseconds. - Weight::from_ref_time(39_610_161 as u64) - // Standard Error: 1_231 - .saturating_add(Weight::from_ref_time(32_991 as u64).saturating_mul(m as u64)) - // Standard Error: 1_200 - .saturating_add(Weight::from_ref_time(170_773 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (33_494_000 as Weight) + // Standard Error: 1_000 + .saturating_add((58_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Prime (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 49_840 nanoseconds. - Weight::from_ref_time(48_542_914 as u64) - // Standard Error: 136 - .saturating_add(Weight::from_ref_time(2_650 as u64).saturating_mul(b as u64)) - // Standard Error: 1_442 - .saturating_add(Weight::from_ref_time(37_898 as u64).saturating_mul(m as u64)) - // Standard Error: 1_406 - .saturating_add(Weight::from_ref_time(182_176 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (36_566_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((63_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((158_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) - /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { - // Minimum execution time: 24_199 nanoseconds. - Weight::from_ref_time(26_869_176 as u64) - // Standard Error: 1_609 - .saturating_add(Weight::from_ref_time(163_341 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (20_159_000 as Weight) + // Standard Error: 1_000 + .saturating_add((173_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } } diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 7483ec8935890..ac85c469354fe 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -20,12 +20,12 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } -wasm-instrument = { version = "0.3", default-features = false } +wasm-instrument = { version = "0.1", default-features = false } serde = { version = "1", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } -wasmi-validation = { version = "0.5", default-features = false } +wasmi-validation = { version = "0.4", default-features = false } impl-trait-for-tuples = "0.2" # Only used in benchmarking to generate random contract code @@ -36,9 +36,8 @@ rand_pcg = { version = "0.3", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "primitives" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "common" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -46,9 +45,9 @@ sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../p sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -array-bytes = "4.1" assert_matches = "1" env_logger = "0.9" +hex-literal = "0.3" pretty_assertions = "1" wat = "1" @@ -70,7 +69,7 @@ std = [ "sp-io/std", "sp-std/std", "sp-sandbox/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "wasm-instrument/std", diff --git a/frame/contracts/README.md b/frame/contracts/README.md index 18d16889a3fe8..bd5e58d89d1ce 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -37,37 +37,12 @@ changes still persist. One gas is equivalent to one [weight](https://docs.substrate.io/v3/runtime/weights-and-fees) which is defined as one picosecond of execution time on the runtime's reference machine. -### Revert Behaviour +### Notable Scenarios -Contract call failures are not cascading. When failures occur in a sub-call, they do not "bubble up", +Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", and the call will only revert at the specific contract level. For example, if contract A calls contract B, and B fails, A can decide how to handle that failure, either proceeding or reverting A's changes. -### Offchain Execution - -In general, a contract execution needs to be deterministic so that all nodes come to the same -conclusion when executing it. To that end we disallow any instructions that could cause -indeterminism. Most notable are any floating point arithmetic. That said, sometimes contracts -are executed off-chain and hence are not subject to consensus. If code is only executed by a -single node and implicitly trusted by other actors is such a case. Trusted execution environments -come to mind. To that end we allow the execution of indeterminstic code for offchain usages -with the following constraints: - -1. No contract can ever be instantiated from an indeterministic code. The only way to execute -the code is to use a delegate call from a deterministic contract. -2. The code that wants to use this feature needs to depend on `pallet-contracts` and use `bare_call` -directly. This makes sure that by default `pallet-contracts` does not expose any indeterminism. - -## How to use - -When setting up the `Schedule` for your runtime make sure to set `InstructionWeights::fallback` -to a non zero value. The default is `0` and prevents the upload of any non deterministic code. - -An indeterministic code can be deployed on-chain by passing `Determinism::AllowIndeterministic` -to `upload_code`. A determinstic contract can then delegate call into it if and only if it -is ran by using `bare_call` and passing `Determinism::AllowIndeterministic` to it. **Never use -this argument when the contract is called from an on-chain transaction.** - ## Interface ### Dispatchable functions diff --git a/frame/contracts/primitives/Cargo.toml b/frame/contracts/common/Cargo.toml similarity index 70% rename from frame/contracts/primitives/Cargo.toml rename to frame/contracts/common/Cargo.toml index c8b7c4a2f7c37..49d7973ab155f 100644 --- a/frame/contracts/primitives/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -15,16 +15,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +serde = { version = "1", features = ["derive"], optional = true } # Substrate Dependencies (This crate should not rely on frame) +sp-core = { version = "6.0.0", path = "../../../primitives/core", default-features = false } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } +sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc", optional = true } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../../primitives/weights" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", + "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-rpc", + "serde", ] diff --git a/frame/contracts/primitives/README.md b/frame/contracts/common/README.md similarity index 100% rename from frame/contracts/primitives/README.md rename to frame/contracts/common/README.md diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/common/src/lib.rs similarity index 72% rename from frame/contracts/primitives/src/lib.rs rename to frame/contracts/common/src/lib.rs index 4faea9eb3ee75..f810725afcd36 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/common/src/lib.rs @@ -21,23 +21,38 @@ use bitflags::bitflags; use codec::{Decode, Encode}; +use sp_core::Bytes; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; use sp_std::prelude::*; -use sp_weights::Weight; + +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "std")] +use sp_rpc::number::NumberOrHex; /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr( + feature = "std", + serde( + rename_all = "camelCase", + bound(serialize = "R: Serialize, Balance: Copy + Into"), + bound(deserialize = "R: Deserialize<'de>, Balance: TryFrom") + ) +)] pub struct ContractResult { - /// How much weight was consumed during execution. - pub gas_consumed: Weight, - /// How much weight is required as gas limit in order to execute this call. + /// How much gas was consumed during execution. + pub gas_consumed: u64, + /// How much gas is required as gas limit in order to execute this call. /// - /// This value should be used to determine the weight limit for on-chain execution. + /// This value should be used to determine the gas limit for on-chain execution. /// /// # Note /// @@ -45,7 +60,7 @@ pub struct ContractResult { /// is used. Currently, only `seal_call_runtime` makes use of pre charging. /// Additionally, any `seal_call` or `seal_instantiate` makes use of pre-charging /// when a non-zero `gas_limit` argument is supplied. - pub gas_required: Weight, + pub gas_required: u64, /// How much balance was deposited and reserved during execution in order to pay for storage. /// /// The storage deposit is never actually charged from the caller in case of [`Self::result`] @@ -65,6 +80,7 @@ pub struct ContractResult { /// /// The debug message is never generated during on-chain execution. It is reserved for /// RPC calls. + #[cfg_attr(feature = "std", serde(with = "as_string"))] pub debug_message: Vec, /// The execution result of the wasm code. pub result: R, @@ -97,6 +113,8 @@ pub enum ContractAccessError { bitflags! { /// Flags used by a contract to customize exit behaviour. #[derive(Encode, Decode)] + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[cfg_attr(feature = "std", serde(rename_all = "camelCase", transparent))] pub struct ReturnFlags: u32 { /// If this bit is set all changes made by the contract execution are rolled back. const REVERT = 0x0000_0001; @@ -105,11 +123,13 @@ bitflags! { /// Output of a contract call or instantiation which ran to completion. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct ExecReturnValue { /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. - pub data: Vec, + pub data: Bytes, } impl ExecReturnValue { @@ -121,6 +141,8 @@ impl ExecReturnValue { /// The result of a successful contract instantiation. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct InstantiateReturnValue { /// The output of the called constructor. pub result: ExecReturnValue, @@ -130,40 +152,63 @@ pub struct InstantiateReturnValue { /// The result of succesfully uploading a contract. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr( + feature = "std", + serde( + rename_all = "camelCase", + bound(serialize = "CodeHash: Serialize, Balance: Copy + Into"), + bound(deserialize = "CodeHash: Deserialize<'de>, Balance: TryFrom") + ) +)] pub struct CodeUploadReturnValue { /// The key under which the new code is stored. pub code_hash: CodeHash, /// The deposit that was reserved at the caller. Is zero when the code already existed. + #[cfg_attr(feature = "std", serde(with = "as_hex"))] pub deposit: Balance, } /// Reference to an existing code hash or a new wasm module. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub enum Code { /// A wasm module as raw bytes. - Upload(Vec), + Upload(Bytes), /// The code hash of an on-chain wasm blob. Existing(Hash), } impl>, Hash> From for Code { fn from(from: T) -> Self { - Code::Upload(from.into()) + Code::Upload(Bytes(from.into())) } } /// The amount of balance that was either charged or refunded in order to pay for storage. #[derive(Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, Clone)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr( + feature = "std", + serde( + rename_all = "camelCase", + bound(serialize = "Balance: Copy + Into"), + bound(deserialize = "Balance: TryFrom") + ) +)] pub enum StorageDeposit { /// The transaction reduced storage consumption. /// /// This means that the specified amount of balance was transferred from the involved /// contracts to the call origin. + #[cfg_attr(feature = "std", serde(with = "as_hex"))] Refund(Balance), /// The transaction increased overall storage usage. /// /// This means that the specified amount of balance was transferred from the call origin /// to the contracts involved. + #[cfg_attr(feature = "std", serde(with = "as_hex"))] Charge(Balance), } @@ -250,3 +295,42 @@ where } } } + +#[cfg(feature = "std")] +mod as_string { + use super::*; + use serde::{ser::Error, Deserializer, Serializer}; + + pub fn serialize(bytes: &Vec, serializer: S) -> Result { + std::str::from_utf8(bytes) + .map_err(|e| S::Error::custom(format!("Debug buffer contains invalid UTF8: {}", e)))? + .serialize(serializer) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { + Ok(String::deserialize(deserializer)?.into_bytes()) + } +} + +#[cfg(feature = "std")] +mod as_hex { + use super::*; + use serde::{de::Error as _, Deserializer, Serializer}; + + pub fn serialize(balance: &Balance, serializer: S) -> Result + where + S: Serializer, + Balance: Copy + Into, + { + Into::::into(*balance).serialize(serializer) + } + + pub fn deserialize<'de, D, Balance>(deserializer: D) -> Result + where + D: Deserializer<'de>, + Balance: TryFrom, + { + Balance::try_from(NumberOrHex::deserialize(deserializer)?) + .map_err(|_| D::Error::custom("Cannot decode NumberOrHex to Balance")) + } +} diff --git a/frame/contracts/fixtures/call_runtime.wat b/frame/contracts/fixtures/call_runtime.wat index 62fa08680a097..d5467f6e95e3e 100644 --- a/frame/contracts/fixtures/call_runtime.wat +++ b/frame/contracts/fixtures/call_runtime.wat @@ -1,6 +1,6 @@ ;; This passes its input to `seal_call_runtime` and returns the return value to its caller. (module - (import "__unstable__" "call_runtime" (func $call_runtime (param i32 i32) (result i32))) + (import "__unstable__" "seal_call_runtime" (func $seal_call_runtime (param i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -17,7 +17,7 @@ ) ;; Just use the call passed as input and store result to memory (i32.store (i32.const 0) - (call $call_runtime + (call $seal_call_runtime (i32.const 4) ;; Pointer where the call is stored (i32.load (i32.const 0)) ;; Size of the call ) diff --git a/frame/contracts/fixtures/create_storage_and_call.wat b/frame/contracts/fixtures/create_storage_and_call.wat deleted file mode 100644 index 2a1e53f7ce08a..0000000000000 --- a/frame/contracts/fixtures/create_storage_and_call.wat +++ /dev/null @@ -1,55 +0,0 @@ -;; This calls another contract as passed as its account id. It also creates some storage. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) - (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "deploy")) - - (func (export "call") - ;; store length of input buffer - (i32.store (i32.const 0) (i32.const 512)) - - ;; copy input at address 4 - (call $seal_input (i32.const 4) (i32.const 0)) - - ;; create 4 byte of storage before calling - (call $seal_set_storage - (i32.const 0) ;; Pointer to storage key - (i32.const 0) ;; Pointer to value - (i32.const 4) ;; Size of value - ) - - ;; call passed contract - (call $assert (i32.eqz - (call $seal_call - (i32.const 0) ;; No flags - (i32.const 8) ;; Pointer to "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 512) ;; Pointer to the buffer with value to transfer - (i32.const 4) ;; Pointer to input data buffer address - (i32.const 4) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - ) - )) - - ;; create 8 byte of storage after calling - ;; item of 12 bytes because we override 4 bytes - (call $seal_set_storage - (i32.const 0) ;; Pointer to storage key - (i32.const 0) ;; Pointer to value - (i32.const 12) ;; Size of value - ) - ) -) diff --git a/frame/contracts/fixtures/delegate_call_simple.wat b/frame/contracts/fixtures/delegate_call_simple.wat deleted file mode 100644 index 24ae5a13e33e5..0000000000000 --- a/frame/contracts/fixtures/delegate_call_simple.wat +++ /dev/null @@ -1,50 +0,0 @@ -;; Just delegate call into the passed code hash and assert success. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_delegate_call" (func $seal_delegate_call (param i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 3 3)) - - ;; [0, 32) buffer where input is copied - - ;; [32, 36) size of the input buffer - (data (i32.const 32) "\20") - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "call") - ;; Reading "callee" code_hash - (call $seal_input (i32.const 0) (i32.const 32)) - - ;; assert input size == 32 - (call $assert - (i32.eq - (i32.load (i32.const 32)) - (i32.const 32) - ) - ) - - ;; Delegate call into passed code hash - (call $assert - (i32.eq - (call $seal_delegate_call - (i32.const 0) ;; Set no call flags - (i32.const 0) ;; Pointer to "callee" code_hash. - (i32.const 0) ;; Input is ignored - (i32.const 0) ;; Length of the input - (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - ) - (i32.const 0) - ) - ) - ) - - (func (export "deploy")) -) diff --git a/frame/contracts/fixtures/float_instruction.wat b/frame/contracts/fixtures/float_instruction.wat deleted file mode 100644 index c19b5c12cdcec..0000000000000 --- a/frame/contracts/fixtures/float_instruction.wat +++ /dev/null @@ -1,11 +0,0 @@ -;; Module that contains a float instruction which is illegal in deterministic mode -(module - (func (export "call") - f32.const 1 - drop - ) - (func (export "deploy") - f32.const 2 - drop - ) -) diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 648bf0fd1f812..dca29c805cec4 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -15,23 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Procedural macroses used in the contracts module. -//! -//! Most likely you should use the [`#[define_env]`][`macro@define_env`] attribute macro which hides -//! boilerplate of defining external environment for a wasm module. +//! Proc macros used in the contracts module. #![no_std] extern crate alloc; -use alloc::{ - format, - string::{String, ToString}, - vec::Vec, -}; +use alloc::string::ToString; use proc_macro2::TokenStream; -use quote::{quote, quote_spanned, ToTokens}; -use syn::{parse_macro_input, spanned::Spanned, Data, DeriveInput, Ident}; +use quote::{quote, quote_spanned}; +use syn::{parse_macro_input, Data, DeriveInput, Ident}; /// This derives `Debug` for a struct where each field must be of some numeric type. /// It interprets each field as its represents some weight and formats it as times so that @@ -92,7 +85,7 @@ fn derive_debug( /// This is only used then the `full` feature is activated. #[cfg(feature = "full")] fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream) -> TokenStream { - use syn::Fields; + use syn::{spanned::Spanned, Fields}; match &data.fields { Fields::Named(fields) => { @@ -147,438 +140,3 @@ fn format_default(field: &Ident) -> TokenStream { &self.#field } } - -/// Parsed environment definition. -struct EnvDef { - host_funcs: Vec, -} - -/// Parsed host function definition. -struct HostFn { - item: syn::ItemFn, - module: String, - name: String, - returns: HostFnReturn, -} - -enum HostFnReturn { - Unit, - U32, - ReturnCode, -} - -impl ToTokens for HostFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.item.to_tokens(tokens); - } -} - -impl HostFn { - pub fn try_from(item: syn::ItemFn) -> syn::Result { - let err = |span, msg| { - let msg = format!("Invalid host function definition. {}", msg); - syn::Error::new(span, msg) - }; - let msg = "only #[version()] or #[unstable] attribute is allowed."; - let span = item.span(); - let mut attrs = item.attrs.clone(); - attrs.retain(|a| !(a.path.is_ident("doc") || a.path.is_ident("prefixed_alias"))); - let name = item.sig.ident.to_string(); - let module = match attrs.len() { - 0 => Ok("seal0".to_string()), - 1 => { - let attr = &attrs[0]; - let ident = attr.path.get_ident().ok_or(err(span, msg))?.to_string(); - match ident.as_str() { - "version" => { - let ver: syn::LitInt = attr.parse_args()?; - Ok(format!("seal{}", ver.base10_parse::().map_err(|_| err(span, msg))?)) - }, - "unstable" => Ok("__unstable__".to_string()), - _ => Err(err(span, msg)), - } - }, - _ => Err(err(span, msg)), - }?; - - let msg = r#"Should return one of the following: - - Result<(), TrapReason>, - - Result, - - Result"#; - - let ret_ty = match item.clone().sig.output { - syn::ReturnType::Type(_, ty) => Ok(ty.clone()), - _ => Err(err(span, &msg)), - }?; - - match *ret_ty { - syn::Type::Path(tp) => { - let result = &tp.path.segments.last().ok_or(err(span, &msg))?; - let (id, span) = (result.ident.to_string(), result.ident.span()); - id.eq(&"Result".to_string()).then_some(()).ok_or(err(span, &msg))?; - - match &result.arguments { - syn::PathArguments::AngleBracketed(group) => { - if group.args.len() != 2 { - return Err(err(span, &msg)) - }; - - let arg2 = group.args.last().ok_or(err(span, &msg))?; - - let err_ty = match arg2 { - syn::GenericArgument::Type(ty) => Ok(ty.clone()), - _ => Err(err(arg2.span(), &msg)), - }?; - - match err_ty { - syn::Type::Path(tp) => Ok(tp - .path - .segments - .first() - .ok_or(err(arg2.span(), &msg))? - .ident - .to_string()), - _ => Err(err(tp.span(), &msg)), - }? - .eq("TrapReason") - .then_some(()) - .ok_or(err(span, &msg))?; - - let arg1 = group.args.first().ok_or(err(span, &msg))?; - let ok_ty = match arg1 { - syn::GenericArgument::Type(ty) => Ok(ty.clone()), - _ => Err(err(arg1.span(), &msg)), - }?; - let ok_ty_str = match ok_ty { - syn::Type::Path(tp) => Ok(tp - .path - .segments - .first() - .ok_or(err(arg1.span(), &msg))? - .ident - .to_string()), - syn::Type::Tuple(tt) => { - if !tt.elems.is_empty() { - return Err(err(arg1.span(), &msg)) - }; - Ok("()".to_string()) - }, - _ => Err(err(ok_ty.span(), &msg)), - }?; - - let returns = match ok_ty_str.as_str() { - "()" => Ok(HostFnReturn::Unit), - "u32" => Ok(HostFnReturn::U32), - "ReturnCode" => Ok(HostFnReturn::ReturnCode), - _ => Err(err(arg1.span(), &msg)), - }?; - Ok(Self { item, module, name, returns }) - }, - _ => Err(err(span, &msg)), - } - }, - _ => Err(err(span, &msg)), - } - } - - fn to_wasm_sig(&self) -> TokenStream { - let args = self.item.sig.inputs.iter().skip(1).filter_map(|a| match a { - syn::FnArg::Typed(pt) => Some(&pt.ty), - _ => None, - }); - let returns = match &self.returns { - HostFnReturn::U32 => quote! { vec![ ::VALUE_TYPE ] }, - HostFnReturn::ReturnCode => quote! { vec![ ::VALUE_TYPE ] }, - HostFnReturn::Unit => quote! { vec![] }, - }; - - quote! { - wasm_instrument::parity_wasm::elements::FunctionType::new( - vec! [ #(<#args>::VALUE_TYPE),* ], - #returns, - ) - } - } -} - -impl EnvDef { - pub fn try_from(item: syn::ItemMod) -> syn::Result { - let span = item.span(); - let err = |msg| syn::Error::new(span, msg); - let items = &item - .content - .as_ref() - .ok_or(err("Invalid environment definition, expected `mod` to be inlined."))? - .1; - - let extract_fn = |i: &syn::Item| match i { - syn::Item::Fn(i_fn) => Some(i_fn.clone()), - _ => None, - }; - - let selector = |a: &syn::Attribute| a.path.is_ident("prefixed_alias"); - - let aliases = items - .iter() - .filter_map(extract_fn) - .filter(|i| i.attrs.iter().any(selector)) - .map(|mut i| { - i.attrs.retain(|i| !selector(i)); - i.sig.ident = syn::Ident::new( - &format!("seal_{}", &i.sig.ident.to_string()), - i.sig.ident.span(), - ); - i - }) - .map(|i| HostFn::try_from(i)); - - let host_funcs = items - .iter() - .filter_map(extract_fn) - .map(|i| HostFn::try_from(i)) - .chain(aliases) - .collect::, _>>()?; - - Ok(Self { host_funcs }) - } -} - -/// Expands environment definiton. -/// Should generate source code for: -/// - wasm import satisfy checks (see `expand_can_satisfy()`); -/// - implementations of the host functions to be added to the wasm runtime environment (see -/// `expand_impls()`). -fn expand_env(def: &mut EnvDef) -> proc_macro2::TokenStream { - let can_satisfy = expand_can_satisfy(def); - let impls = expand_impls(def); - - quote! { - pub struct Env; - #can_satisfy - #impls - } -} - -/// Generates `can_satisfy()` method for every host function, to be used to check -/// these functions versus expected module, name and signatures when imporing them from a wasm -/// module. -fn expand_can_satisfy(def: &mut EnvDef) -> proc_macro2::TokenStream { - let checks = def.host_funcs.iter().map(|f| { - let (module, name, signature) = (&f.module, &f.name, &f.to_wasm_sig()); - quote! { - if module == #module.as_bytes() - && name == #name.as_bytes() - && signature == &#signature - { - return true; - } - } - }); - let satisfy_checks = quote! { - #( #checks )* - }; - - quote! { - impl crate::wasm::env_def::ImportSatisfyCheck for Env { - fn can_satisfy( - module: &[u8], - name: &[u8], - signature: &wasm_instrument::parity_wasm::elements::FunctionType, - ) -> bool { - use crate::wasm::env_def::ConvertibleToWasm; - #[cfg(not(feature = "unstable-interface"))] - if module == b"__unstable__" { - return false; - } - #satisfy_checks - return false; - } - } - } -} - -/// Generates implementation for every host function, to register it in the contract execution -/// environment. -fn expand_impls(def: &mut EnvDef) -> proc_macro2::TokenStream { - let impls = def.host_funcs.iter().map(|f| { - let params = &f.item.sig.inputs.iter().skip(1).map(|arg| { - match arg { - syn::FnArg::Typed(pt) => { - if let syn::Pat::Ident(ident) = &*pt.pat { - let p_type = &pt.ty; - let p_name = ident.ident.clone(); - quote! { - let #p_name : <#p_type as crate::wasm::env_def::ConvertibleToWasm>::NativeType = - args.next() - .and_then(|v| <#p_type as crate::wasm::env_def::ConvertibleToWasm>::from_typed_value(v.clone())) - .expect( - "precondition: all imports should be checked against the signatures of corresponding - functions defined by `#[define_env]` proc macro by the user of the macro; - thus this can never be `None`; - qed;" - ); - } - } else { quote! { } } - }, - _ => quote! { }, - } - }); - - let outline = match &f.returns { - HostFnReturn::Unit => quote! { - body().map_err(|reason| { - ctx.set_trap_reason(reason); - sp_sandbox::HostError - })?; - return Ok(sp_sandbox::ReturnValue::Unit); - }, - _ => quote! { - let r = body().map_err(|reason| { - ctx.set_trap_reason(reason); - sp_sandbox::HostError - })?; - return Ok(sp_sandbox::ReturnValue::Value({ - r.to_typed_value() - })); - }, - }; - let params = params.clone(); - let (module, name, ident, body) = (&f.module, &f.name, &f.item.sig.ident, &f.item.block); - let unstable_feat = match module.as_str() { - "__unstable__" => quote! { #[cfg(feature = "unstable-interface")] }, - _ => quote! { }, - }; - quote! { - #unstable_feat - f(#module.as_bytes(), #name.as_bytes(), { - fn #ident( - ctx: &mut crate::wasm::Runtime, - args: &[sp_sandbox::Value], - ) -> Result - where - ::AccountId: sp_core::crypto::UncheckedFrom<::Hash> - + AsRef<[u8]>, - { - #[allow(unused)] - let mut args = args.iter(); - let mut body = || { - #( #params )* - #body - }; - #outline - } - #ident:: - }); - } - }); - - let packed_impls = quote! { - #( #impls )* - }; - - quote! { - impl crate::wasm::env_def::FunctionImplProvider for Env - where - ::AccountId: - sp_core::crypto::UncheckedFrom<::Hash> + AsRef<[u8]>, - { - fn impls)>(f: &mut F) { - #packed_impls - } - } - } -} - -/// Defines a host functions set that can be imported by contract wasm code. -/// -/// **NB**: Be advised that all functions defined by this macro -/// will panic if called with unexpected arguments. -/// -/// It's up to you as the user of this macro to check signatures of wasm code to be executed -/// and reject the code if any imported function has a mismatched signature. -/// -/// ## Example -/// -/// ```nocompile -/// #[define_env] -/// pub mod some_env { -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result<(), TrapReason> { -/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) -/// } -/// } -/// ``` -/// This example will expand to the `some_host_fn()` defined in the wasm module named `seal0`. -/// To define a host function in `seal1` and `__unstable__` modules, it should be annotated with the -/// appropriate attribute as follows: -/// -/// ## Example -/// -/// ```nocompile -/// #[define_env] -/// pub mod some_env { -/// #[version(1)] -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { -/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) -/// } -/// -/// #[unstable] -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { -/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) -/// } -/// } -/// ``` -/// -/// In legacy versions of pallet_contracts, it was a naming convention that all host functions had -/// to be named with the `seal_` prefix. For the sake of backwards compatibility, each host function -/// now can get a such prefix-named alias function generated by marking it by the -/// `#[prefixed_alias]` attribute: -/// -/// ## Example -/// -/// ```nocompile -/// #[define_env] -/// pub mod some_env { -/// #[version(1)] -/// #[prefixed_alias] -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { -/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) -/// } -/// -/// #[unstable] -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { -/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) -/// } -/// } -/// ``` -/// -/// In this example, the following host functions will be generated by the macro: -/// - `some_host_fn()` in module `seal1`, -/// - `seal_some_host_fn()` in module `seal1`, -/// - `some_host_fn()` in module `__unstable__`. -/// -/// Only following return types are allowed for the host functions defined with the macro: -/// - `Result<(), TrapReason>`, -/// - `Result`, -/// - `Result`. -/// -/// The macro expands to `pub struct Env` declaration, with the following traits implementations: -/// - `pallet_contracts::wasm::env_def::ImportSatisfyCheck` -/// - `pallet_contracts::wasm::env_def::FunctionImplProvider` -#[proc_macro_attribute] -pub fn define_env( - attr: proc_macro::TokenStream, - item: proc_macro::TokenStream, -) -> proc_macro::TokenStream { - if !attr.is_empty() { - let msg = "Invalid `define_env` attribute macro: expected no attributes: `#[define_env]`."; - let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into() - } - - let item = syn::parse_macro_input!(item as syn::ItemMod); - - match EnvDef::try_from(item) { - Ok(mut def) => expand_env(&mut def).into(), - Err(e) => e.to_compile_error().into(), - } -} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml new file mode 100644 index 0000000000000..7876c7cba40d0 --- /dev/null +++ b/frame/contracts/rpc/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "pallet-contracts-rpc" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Node-specific RPC methods for interaction with contracts." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0" } +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +serde = { version = "1", features = ["derive"] } + +# Substrate Dependencies +pallet-contracts-primitives = { version = "6.0.0", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { version = "6.0.0", path = "../../../primitives/core" } +sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } + +[dev-dependencies] +serde_json = "1" diff --git a/frame/contracts/rpc/README.md b/frame/contracts/rpc/README.md new file mode 100644 index 0000000000000..be6df237bf60d --- /dev/null +++ b/frame/contracts/rpc/README.md @@ -0,0 +1,3 @@ +Node-specific RPC methods for interaction with contracts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml new file mode 100644 index 0000000000000..bd07d577ec272 --- /dev/null +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "pallet-contracts-rpc-runtime-api" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime API definition required by Contracts RPC extensions." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +# Substrate Dependencies +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../common" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../../../primitives/std" } + +[features] +default = ["std"] +std = [ + "sp-api/std", + "codec/std", + "scale-info/std", + "sp-std/std", + "sp-runtime/std", + "pallet-contracts-primitives/std", +] diff --git a/frame/contracts/rpc/runtime-api/README.md b/frame/contracts/rpc/runtime-api/README.md new file mode 100644 index 0000000000000..d57f29a93bd1d --- /dev/null +++ b/frame/contracts/rpc/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition required by Contracts RPC extensions. + +This API should be imported and implemented by the runtime, +of a node that wants to use the custom RPC extension +adding Contracts access methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs new file mode 100644 index 0000000000000..9765b37057c7b --- /dev/null +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -0,0 +1,85 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime API definition required by Contracts RPC extensions. +//! +//! This API should be imported and implemented by the runtime, +//! of a node that wants to use the custom RPC extension +//! adding Contracts access methods. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Codec; +use pallet_contracts_primitives::{ + Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, GetStorageResult, +}; +use sp_std::vec::Vec; + +sp_api::decl_runtime_apis! { + /// The API to interact with contracts without using executive. + pub trait ContractsApi where + AccountId: Codec, + Balance: Codec, + BlockNumber: Codec, + Hash: Codec, + { + /// Perform a call from a specified account to a given contract. + /// + /// See `pallet_contracts::Pallet::call`. + fn call( + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: u64, + storage_deposit_limit: Option, + input_data: Vec, + ) -> ContractExecResult; + + /// Instantiate a new contract. + /// + /// See `pallet_contracts::Pallet::instantiate`. + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: u64, + storage_deposit_limit: Option, + code: Code, + data: Vec, + salt: Vec, + ) -> ContractInstantiateResult; + + + /// Upload new code without instantiating a contract from it. + /// + /// See `pallet_contracts::Pallet::upload_code`. + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + ) -> CodeUploadResult; + + /// Query a given storage key in a given contract. + /// + /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the + /// specified account and `Ok(None)` if it doesn't. If the account specified by the address + /// doesn't exist, or doesn't have a contract then `Err` is returned. + fn get_storage( + address: AccountId, + key: Vec, + ) -> GetStorageResult; + } +} diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs new file mode 100644 index 0000000000000..0df8f90237ed3 --- /dev/null +++ b/frame/contracts/rpc/src/lib.rs @@ -0,0 +1,524 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Node-specific RPC methods for interaction with contracts. + +#![warn(unused_crate_dependencies)] + +use std::{marker::PhantomData, sync::Arc}; + +use codec::Codec; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::error::{CallError, ErrorCode, ErrorObject}, +}; +use pallet_contracts_primitives::{ + Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, +}; +use serde::{Deserialize, Serialize}; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::Bytes; +use sp_rpc::number::NumberOrHex; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; + +pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; + +const RUNTIME_ERROR: i32 = 1; +const CONTRACT_DOESNT_EXIST: i32 = 2; +const KEY_DECODING_FAILED: i32 = 3; + +pub type Weight = u64; + +/// A rough estimate of how much gas a decent hardware consumes per second, +/// using native execution. +/// This value is used to set the upper bound for maximal contract calls to +/// prevent blocking the RPC for too long. +/// +/// As 1 gas is equal to 1 weight we base this on the conducted benchmarks which +/// determined runtime weights: +/// +const GAS_PER_SECOND: Weight = 1_000_000_000_000; + +/// The maximum amount of weight that the call and instantiate rpcs are allowed to consume. +/// This puts a ceiling on the weight limit that is supplied to the rpc as an argument. +const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; + +/// A private newtype for converting `ContractAccessError` into an RPC error. +struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); + +impl From for JsonRpseeError { + fn from(e: ContractAccessError) -> Self { + use pallet_contracts_primitives::ContractAccessError::*; + match e.0 { + DoesntExist => CallError::Custom(ErrorObject::owned( + CONTRACT_DOESNT_EXIST, + "The specified contract doesn't exist.", + None::<()>, + )) + .into(), + KeyDecodingFailed => CallError::Custom(ErrorObject::owned( + KEY_DECODING_FAILED, + "Failed to decode the specified storage key.", + None::<()>, + )) + .into(), + } + } +} + +/// A struct that encodes RPC parameters required for a call to a smart-contract. +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct CallRequest { + origin: AccountId, + dest: AccountId, + value: NumberOrHex, + gas_limit: NumberOrHex, + storage_deposit_limit: Option, + input_data: Bytes, +} + +/// A struct that encodes RPC parameters required to instantiate a new smart-contract. +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct InstantiateRequest { + origin: AccountId, + value: NumberOrHex, + gas_limit: NumberOrHex, + storage_deposit_limit: Option, + code: Code, + data: Bytes, + salt: Bytes, +} + +/// A struct that encodes RPC parameters required for a call to upload a new code. +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct CodeUploadRequest { + origin: AccountId, + code: Bytes, + storage_deposit_limit: Option, +} + +/// Contracts RPC methods. +#[rpc(client, server)] +pub trait ContractsApi +where + Balance: Copy + TryFrom + Into, +{ + /// Executes a call to a contract. + /// + /// This call is performed locally without submitting any transactions. Thus executing this + /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. + /// + /// This method is useful for calling getter-like methods on contracts or to dry-run a + /// a contract call in order to determine the `gas_limit`. + #[method(name = "contracts_call")] + fn call( + &self, + call_request: CallRequest, + at: Option, + ) -> RpcResult>; + + /// Instantiate a new contract. + /// + /// This instantiate is performed locally without submitting any transactions. Thus the contract + /// is not actually created. + /// + /// This method is useful for UIs to dry-run contract instantiations. + #[method(name = "contracts_instantiate")] + fn instantiate( + &self, + instantiate_request: InstantiateRequest, + at: Option, + ) -> RpcResult>; + + /// Upload new code without instantiating a contract from it. + /// + /// This upload is performed locally without submitting any transactions. Thus executing this + /// won't change any state. + /// + /// This method is useful for UIs to dry-run code upload. + #[method(name = "contracts_upload_code")] + fn upload_code( + &self, + upload_request: CodeUploadRequest, + at: Option, + ) -> RpcResult>; + + /// Returns the value under a specified storage `key` in a contract given by `address` param, + /// or `None` if it is not set. + #[method(name = "contracts_getStorage")] + fn get_storage( + &self, + address: AccountId, + key: Bytes, + at: Option, + ) -> RpcResult>; +} + +/// Contracts RPC methods. +pub struct Contracts { + client: Arc, + _marker: PhantomData, +} + +impl Contracts { + /// Create new `Contracts` with the given reference to the client. + pub fn new(client: Arc) -> Self { + Self { client, _marker: Default::default() } + } +} + +#[async_trait] +impl + ContractsApiServer< + ::Hash, + <::Header as HeaderT>::Number, + AccountId, + Balance, + Hash, + > for Contracts +where + Block: BlockT, + Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + Client::Api: ContractsRuntimeApi< + Block, + AccountId, + Balance, + <::Header as HeaderT>::Number, + Hash, + >, + AccountId: Codec, + Balance: Codec + Copy + TryFrom + Into, + Hash: Codec, +{ + fn call( + &self, + call_request: CallRequest, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash)); + + let CallRequest { origin, dest, value, gas_limit, storage_deposit_limit, input_data } = + call_request; + + let value: Balance = decode_hex(value, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + let storage_deposit_limit: Option = + storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; + limit_gas(gas_limit)?; + + api.call(&at, origin, dest, value, gas_limit, storage_deposit_limit, input_data.to_vec()) + .map_err(runtime_error_into_rpc_err) + } + + fn instantiate( + &self, + instantiate_request: InstantiateRequest, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash)); + + let InstantiateRequest { + origin, + value, + gas_limit, + storage_deposit_limit, + code, + data, + salt, + } = instantiate_request; + + let value: Balance = decode_hex(value, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + let storage_deposit_limit: Option = + storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; + limit_gas(gas_limit)?; + + api.instantiate( + &at, + origin, + value, + gas_limit, + storage_deposit_limit, + code, + data.to_vec(), + salt.to_vec(), + ) + .map_err(runtime_error_into_rpc_err) + } + + fn upload_code( + &self, + upload_request: CodeUploadRequest, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash)); + + let CodeUploadRequest { origin, code, storage_deposit_limit } = upload_request; + + let storage_deposit_limit: Option = + storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; + + api.upload_code(&at, origin, code.to_vec(), storage_deposit_limit) + .map_err(runtime_error_into_rpc_err) + } + + fn get_storage( + &self, + address: AccountId, + key: Bytes, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + let result = api + .get_storage(&at, address, key.to_vec()) + .map_err(runtime_error_into_rpc_err)? + .map_err(ContractAccessError)? + .map(Bytes); + + Ok(result) + } +} + +/// Converts a runtime trap into an RPC error. +fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> JsonRpseeError { + CallError::Custom(ErrorObject::owned( + RUNTIME_ERROR, + "Runtime error", + Some(format!("{:?}", err)), + )) + .into() +} + +fn decode_hex>(from: H, name: &str) -> RpcResult { + from.try_into().map_err(|_| { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("{:?} does not fit into the {} type", from, name), + None::<()>, + ))) + }) +} + +fn limit_gas(gas_limit: Weight) -> RpcResult<()> { + if gas_limit > GAS_LIMIT { + Err(JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!( + "Requested gas limit is greater than maximum allowed: {} > {}", + gas_limit, GAS_LIMIT + ), + None::<()>, + )))) + } else { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pallet_contracts_primitives::{ContractExecResult, ContractInstantiateResult}; + use sp_core::U256; + + fn trim(json: &str) -> String { + json.chars().filter(|c| !c.is_whitespace()).collect() + } + + #[test] + fn call_request_should_serialize_deserialize_properly() { + type Req = CallRequest; + let req: Req = serde_json::from_str( + r#" + { + "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", + "value": "0x112210f4B16c1cb1", + "gasLimit": 1000000000000, + "storageDepositLimit": 5000, + "inputData": "0x8c97db39" + } + "#, + ) + .unwrap(); + assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); + assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); + assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); + } + + #[test] + fn instantiate_request_should_serialize_deserialize_properly() { + type Req = InstantiateRequest; + let req: Req = serde_json::from_str( + r#" + { + "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "value": "0x88", + "gasLimit": 42, + "code": { "existing": "0x1122" }, + "data": "0x4299", + "salt": "0x9988" + } + "#, + ) + .unwrap(); + + assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); + assert_eq!(req.value.into_u256(), 0x88.into()); + assert_eq!(req.gas_limit.into_u256(), 42.into()); + assert_eq!(req.storage_deposit_limit, None); + assert_eq!(&*req.data, [0x42, 0x99].as_ref()); + assert_eq!(&*req.salt, [0x99, 0x88].as_ref()); + let code = match req.code { + Code::Existing(hash) => hash, + _ => panic!("json encoded an existing hash"), + }; + assert_eq!(&code, "0x1122"); + } + + #[test] + fn code_upload_request_should_serialize_deserialize_properly() { + type Req = CodeUploadRequest; + let req: Req = serde_json::from_str( + r#" + { + "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "code": "0x8c97db39", + "storageDepositLimit": 5000 + } + "#, + ) + .unwrap(); + assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); + assert_eq!(&*req.code, [0x8c, 0x97, 0xdb, 0x39].as_ref()); + assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); + } + + #[test] + fn call_result_should_serialize_deserialize_properly() { + fn test(expected: &str) { + let res: ContractExecResult = serde_json::from_str(expected).unwrap(); + let actual = serde_json::to_string(&res).unwrap(); + assert_eq!(actual, trim(expected).as_str()); + } + test( + r#"{ + "gasConsumed": 5000, + "gasRequired": 8000, + "storageDeposit": {"charge": 42000}, + "debugMessage": "HelloWorld", + "result": { + "Ok": { + "flags": 5, + "data": "0x1234" + } + } + }"#, + ); + test( + r#"{ + "gasConsumed": 3400, + "gasRequired": 5200, + "storageDeposit": {"refund": 12000}, + "debugMessage": "HelloWorld", + "result": { + "Err": "BadOrigin" + } + }"#, + ); + } + + #[test] + fn instantiate_result_should_serialize_deserialize_properly() { + fn test(expected: &str) { + let res: ContractInstantiateResult = + serde_json::from_str(expected).unwrap(); + let actual = serde_json::to_string(&res).unwrap(); + assert_eq!(actual, trim(expected).as_str()); + } + test( + r#"{ + "gasConsumed": 5000, + "gasRequired": 8000, + "storageDeposit": {"refund": 12000}, + "debugMessage": "HelloWorld", + "result": { + "Ok": { + "result": { + "flags": 5, + "data": "0x1234" + }, + "accountId": "5CiPP" + } + } + }"#, + ); + test( + r#"{ + "gasConsumed": 3400, + "gasRequired": 5200, + "storageDeposit": {"charge": 0}, + "debugMessage": "HelloWorld", + "result": { + "Err": "BadOrigin" + } + }"#, + ); + } + + #[test] + fn code_upload_result_should_serialize_deserialize_properly() { + fn test(expected: &str) { + let res: CodeUploadResult = serde_json::from_str(expected).unwrap(); + let actual = serde_json::to_string(&res).unwrap(); + assert_eq!(actual, trim(expected).as_str()); + } + test( + r#"{ + "Ok": { + "codeHash": 4711, + "deposit": 99 + } + }"#, + ); + test( + r#"{ + "Err": "BadOrigin" + }"#, + ); + } +} diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index b14b107f34c90..5f9b43d3e3b7a 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -24,7 +24,7 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::{Config, Determinism}; +use crate::Config; use frame_support::traits::Get; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; @@ -195,7 +195,7 @@ where for func in def.imported_functions { let sig = builder::signature() .with_params(func.params) - .with_results(func.return_type) + .with_results(func.return_type.into_iter().collect()) .build_sig(); let sig = contract.push_signature(sig); contract = contract @@ -254,9 +254,9 @@ where code = inject_stack_metering::(code); } - let code = code.into_bytes().unwrap(); + let code = code.to_bytes().unwrap(); let hash = T::Hashing::hash(&code); - Self { code: code.into(), hash, memory: def.memory } + Self { code, hash, memory: def.memory } } } @@ -285,11 +285,11 @@ where .find_map(|e| if let External::Memory(mem) = e.external() { Some(mem) } else { None }) .unwrap() .limits(); - let code = module.into_bytes().unwrap(); + let code = module.to_bytes().unwrap(); let hash = T::Hashing::hash(&code); let memory = ImportedMemory { min_pages: limits.initial(), max_pages: limits.maximum().unwrap() }; - Self { code: code.into(), hash, memory: Some(memory) } + Self { code, hash, memory: Some(memory) } } /// Creates a wasm module with an empty `call` and `deploy` function and nothing else. @@ -554,7 +554,7 @@ where fn inject_gas_metering(module: Module) -> Module { let schedule = T::Schedule::get(); - let gas_rules = schedule.rules(&module, Determinism::Deterministic); + let gas_rules = schedule.rules(&module); wasm_instrument::gas_metering::inject(module, &gas_rules, "seal0").unwrap() } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 5465e720dc197..bea469bd0f5a9 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -57,7 +57,7 @@ const INSTR_BENCHMARK_BATCHES: u32 = 50; struct Contract { caller: T::AccountId, account_id: T::AccountId, - addr: AccountIdLookupOf, + addr: ::Source, value: BalanceOf, } @@ -87,7 +87,7 @@ where module: WasmModule, data: Vec, ) -> Result, &'static str> { - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let salt = vec![0xff]; let addr = Contracts::::contract_address(&caller, &module.hash, &salt); @@ -257,7 +257,7 @@ benchmarks! { let instance = Contract::::with_caller( whitelisted_caller(), WasmModule::sized(c, Location::Deploy), vec![], )?; - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr; }: call(origin, callee, value, Weight::MAX, None, vec![]) @@ -280,7 +280,7 @@ benchmarks! { let c in 0 .. Perbill::from_percent(49).mul_ceil(T::MaxCodeLen::get()); let s in 0 .. code::max_pages::() * 64 * 1024; let salt = vec![42u8; s as usize]; - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); @@ -307,7 +307,7 @@ benchmarks! { instantiate { let s in 0 .. code::max_pages::() * 64 * 1024; let salt = vec![42u8; s as usize]; - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); @@ -338,7 +338,7 @@ benchmarks! { let instance = Contract::::with_caller( whitelisted_caller(), WasmModule::dummy(), vec![], )?; - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); let before = T::Currency::free_balance(&instance.account_id); @@ -371,7 +371,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); let origin = RawOrigin::Signed(caller.clone()); - }: _(origin, code, None, Determinism::Deterministic) + }: _(origin, code, None) verify { // uploading the code reserves some balance in the callers account assert!(T::Currency::reserved_balance(&caller) > 0u32.into()); @@ -386,7 +386,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); - let uploaded = >::bare_upload_code(caller.clone(), code, None, Determinism::Deterministic)?; + let uploaded = >::bare_upload_code(caller.clone(), code, None)?; assert_eq!(uploaded.code_hash, hash); assert_eq!(uploaded.deposit, T::Currency::reserved_balance(&caller)); assert!(>::code_exists(&hash)); @@ -618,11 +618,11 @@ benchmarks! { imported_functions: vec![ImportedFunction { module: "seal0", name: "gas", - params: vec![ValueType::I64], + params: vec![ValueType::I32], return_type: None, }], call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ - Instruction::I64Const(42), + Instruction::I32Const(42), Instruction::Call(0), ])), .. Default::default() @@ -767,13 +767,13 @@ benchmarks! { let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); - assert_eq!(T::Currency::free_balance(&instance.account_id), Pallet::::min_balance()); + assert_eq!(T::Currency::free_balance(&instance.account_id), T::Currency::minimum_balance()); assert_ne!(T::Currency::reserved_balance(&instance.account_id), 0u32.into()); }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) verify { if r > 0 { assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!(T::Currency::total_balance(&beneficiary), Pallet::::min_balance()); + assert_eq!(T::Currency::total_balance(&beneficiary), T::Currency::minimum_balance()); } } @@ -919,8 +919,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", + module: "__unstable__", + name: "seal_set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -967,8 +967,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", + module: "__unstable__", + name: "seal_set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1015,8 +1015,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", + module: "__unstable__", + name: "seal_set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1067,8 +1067,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", - name: "clear_storage", + module: "__unstable__", + name: "seal_clear_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1114,8 +1114,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", - name: "clear_storage", + module: "__unstable__", + name: "seal_clear_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1162,8 +1162,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", - name: "get_storage", + module: "__unstable__", + name: "seal_get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1216,8 +1216,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", - name: "get_storage", + module: "__unstable__", + name: "seal_get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1271,8 +1271,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", - name: "contains_storage", + module: "__unstable__", + name: "seal_contains_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1318,8 +1318,8 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "seal1", - name: "contains_storage", + module: "__unstable__", + name: "seal_contains_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1367,7 +1367,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "take_storage", + name: "seal_take_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1421,7 +1421,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "take_storage", + name: "seal_take_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1469,7 +1469,7 @@ benchmarks! { .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1705,7 +1705,7 @@ benchmarks! { let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); let hashes_len = hashes_bytes.len(); - let value = Pallet::::min_balance(); + let value = T::Currency::minimum_balance(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1890,7 +1890,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_sha2_256 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_sha2_256", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1908,7 +1908,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_keccak_256 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_keccak_256", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1926,7 +1926,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_blake2_256 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_256", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1944,7 +1944,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_blake2_128 { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_128", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1963,7 +1963,7 @@ benchmarks! { // Only calling the function itself with valid arguments. // It generates different private keys and signatures for the message "Hello world". seal_ecdsa_recover { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let key_type = sp_core::crypto::KeyTypeId(*b"code"); @@ -2011,7 +2011,7 @@ benchmarks! { // Only calling the function itself for the list of // generated different ECDSA keys. seal_ecdsa_to_eth_address { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let pub_keys_bytes = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|_| { @@ -2853,8 +2853,8 @@ benchmarks! { println!("{:#?}", Schedule::::default()); println!("###############################################"); println!("Lazy deletion throughput per block (empty queue, full queue): {}, {}", - weight_limit / weight_per_key.ref_time(), - (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key.ref_time(), + weight_limit / weight_per_key, + (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key, ); } #[cfg(not(feature = "std"))] @@ -2894,7 +2894,6 @@ benchmarks! { None, data, false, - Determinism::Deterministic, ) .result?; } @@ -2942,7 +2941,6 @@ benchmarks! { None, data, false, - Determinism::Deterministic, ) .result?; } diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index d0e0cf5cf95cb..23242a2a542c1 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -38,7 +38,7 @@ //! //! However, only extensions implementing [`RegisteredChainExtension`] can be put into a tuple. //! This is because the [`RegisteredChainExtension::ID`] is used to decide which of those extensions -//! should be used when the contract calls a chain extensions. Extensions which are generally +//! should should be used when the contract calls a chain extensions. Extensions which are generally //! useful should claim their `ID` with [the registry](https://github.com/paritytech/chainextension-registry) //! so that no collisions with other vendors will occur. //! @@ -84,6 +84,7 @@ pub use crate::{exec::Ext, Config}; pub use frame_system::Config as SysConfig; pub use pallet_contracts_primitives::ReturnFlags; pub use sp_core::crypto::UncheckedFrom; +pub use state::Init as InitState; /// Result that returns a [`DispatchError`] on error. pub type Result = sp_std::result::Result; @@ -197,7 +198,7 @@ pub enum RetVal { /// /// It uses [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html) /// to enforce the correct usage of the parameters passed to the chain extension. -pub struct Environment<'a, 'b, E: Ext, S: State> { +pub struct Environment<'a, 'b, E: Ext, S: state::State> { /// The actual data of this type. inner: Inner<'a, 'b, E>, /// `S` is only used in the type system but never as value. @@ -205,7 +206,7 @@ pub struct Environment<'a, 'b, E: Ext, S: State> { } /// Functions that are available in every state of this type. -impl<'a, 'b, E: Ext, S: State> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: state::State> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -214,7 +215,7 @@ where /// It returns the two least significant bytes of the `id` passed by a contract as the other /// two bytes represent the chain extension itself (the code which is calling this function). pub fn func_id(&self) -> u16 { - (self.inner.id & 0x0000FFFF) as u16 + (self.inner.id & 0x00FF) as u16 } /// The chain extension id within the `id` passed by a contract. @@ -238,7 +239,7 @@ where /// /// Weight is synonymous with gas in substrate. pub fn charge_weight(&mut self, amount: Weight) -> Result { - self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount.ref_time())) + self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount)) } /// Adjust a previously charged amount down to its actual amount. @@ -248,7 +249,7 @@ where pub fn adjust_weight(&mut self, charged: ChargedAmount, actual_weight: Weight) { self.inner .runtime - .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight.ref_time())) + .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight)) } /// Grants access to the execution environment of the current contract call. @@ -263,7 +264,7 @@ where /// /// Those are the functions that determine how the arguments to the chain extensions /// should be consumed. -impl<'a, 'b, E: Ext> Environment<'a, 'b, E, InitState> { +impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { /// Creates a new environment for consumption by a chain extension. /// /// It is only available to this crate because only the wasm runtime module needs to @@ -283,23 +284,23 @@ impl<'a, 'b, E: Ext> Environment<'a, 'b, E, InitState> { } /// Use all arguments as integer values. - pub fn only_in(self) -> Environment<'a, 'b, E, OnlyInState> { + pub fn only_in(self) -> Environment<'a, 'b, E, state::OnlyIn> { Environment { inner: self.inner, phantom: PhantomData } } /// Use input arguments as integer and output arguments as pointer to a buffer. - pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, PrimInBufOutState> { + pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, state::PrimInBufOut> { Environment { inner: self.inner, phantom: PhantomData } } /// Use input and output arguments as pointers to a buffer. - pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, BufInBufOutState> { + pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, state::BufInBufOut> { Environment { inner: self.inner, phantom: PhantomData } } } /// Functions to use the input arguments as integers. -impl<'a, 'b, E: Ext, S: PrimIn> Environment<'a, 'b, E, S> { +impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { /// The `input_ptr` argument. pub fn val0(&self) -> u32 { self.inner.input_ptr @@ -312,7 +313,7 @@ impl<'a, 'b, E: Ext, S: PrimIn> Environment<'a, 'b, E, S> { } /// Functions to use the output arguments as integers. -impl<'a, 'b, E: Ext, S: PrimOut> Environment<'a, 'b, E, S> { +impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { /// The `output_ptr` argument. pub fn val2(&self) -> u32 { self.inner.output_ptr @@ -325,7 +326,7 @@ impl<'a, 'b, E: Ext, S: PrimOut> Environment<'a, 'b, E, S> { } /// Functions to use the input arguments as pointer to a buffer. -impl<'a, 'b, E: Ext, S: BufIn> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: state::BufIn> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -388,7 +389,7 @@ where } /// Functions to use the output arguments as pointer to a buffer. -impl<'a, 'b, E: Ext, S: BufOut> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: state::BufOut> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -411,8 +412,7 @@ where buffer, allow_skip, |len| { - weight_per_byte - .map(|w| RuntimeCosts::ChainExtension(w.ref_time().saturating_mul(len.into()))) + weight_per_byte.map(|w| RuntimeCosts::ChainExtension(w.saturating_mul(len.into()))) }, ) } @@ -438,54 +438,31 @@ struct Inner<'a, 'b, E: Ext> { output_len_ptr: u32, } -/// Any state of an [`Environment`] implements this trait. -/// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). -pub trait State: sealed::Sealed {} - -/// A state that uses primitive inputs. -pub trait PrimIn: State {} - -/// A state that uses primitive outputs. -pub trait PrimOut: State {} - -/// A state that uses a buffer as input. -pub trait BufIn: State {} - -/// A state that uses a buffer as output. -pub trait BufOut: State {} - -/// The initial state of an [`Environment`]. -pub enum InitState {} - -/// A state that uses all arguments as primitive inputs. -pub enum OnlyInState {} - -/// A state that uses two arguments as primitive inputs and the other two as buffer output. -pub enum PrimInBufOutState {} - -/// Uses a buffer for input and a buffer for output. -pub enum BufInBufOutState {} - -mod sealed { - use super::*; - - /// Trait to prevent users from implementing `State` for anything else. - pub trait Sealed {} - - impl Sealed for InitState {} - impl Sealed for OnlyInState {} - impl Sealed for PrimInBufOutState {} - impl Sealed for BufInBufOutState {} - - impl State for InitState {} - impl State for OnlyInState {} - impl State for PrimInBufOutState {} - impl State for BufInBufOutState {} - - impl PrimIn for OnlyInState {} - impl PrimOut for OnlyInState {} - impl PrimIn for PrimInBufOutState {} - impl BufOut for PrimInBufOutState {} - impl BufIn for BufInBufOutState {} - impl BufOut for BufInBufOutState {} +/// Private submodule with public types to prevent other modules from naming them. +mod state { + pub trait State {} + + pub trait PrimIn: State {} + pub trait PrimOut: State {} + pub trait BufIn: State {} + pub trait BufOut: State {} + + /// The initial state of an [`Environment`](`super::Environment`). + /// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). + pub enum Init {} + pub enum OnlyIn {} + pub enum PrimInBufOut {} + pub enum BufInBufOut {} + + impl State for Init {} + impl State for OnlyIn {} + impl State for PrimInBufOut {} + impl State for BufInBufOut {} + + impl PrimIn for OnlyIn {} + impl PrimOut for OnlyIn {} + impl PrimIn for PrimInBufOut {} + impl BufOut for PrimInBufOut {} + impl BufIn for BufInBufOut {} + impl BufOut for BufInBufOut {} } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 7955f076b84c4..5ca74e681e5dd 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -18,7 +18,7 @@ use crate::{ gas::GasMeter, storage::{self, Storage, WriteOutcome}, - BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, Determinism, Error, Event, Nonce, + BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, Error, Event, Nonce, Pallet as Contracts, Schedule, }; use frame_support::{ @@ -34,7 +34,7 @@ use pallet_contracts_primitives::ExecReturnValue; use smallvec::{Array, SmallVec}; use sp_core::{crypto::UncheckedFrom, ecdsa::Public as ECDSAPublic}; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; -use sp_runtime::traits::{Convert, Hash}; +use sp_runtime::traits::Convert; use sp_std::{marker::PhantomData, mem, prelude::*}; pub type AccountIdOf = ::AccountId; @@ -282,7 +282,7 @@ pub trait Ext: sealing::Sealed { fn append_debug_buffer(&mut self, msg: &str) -> bool; /// Call some dispatchable and return the result. - fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo; + fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo; /// Recovers ECDSA compressed public key based on signature and message hash. fn ecdsa_recover(&self, signature: &[u8; 65], message_hash: &[u8; 32]) -> Result<[u8; 33], ()>; @@ -355,9 +355,6 @@ pub trait Executable: Sized { /// Size of the instrumented code in bytes. fn code_len(&self) -> u32; - - /// The code does not contain any instructions which could lead to indeterminism. - fn is_deterministic(&self) -> bool; } /// The complete call stack of a contract execution. @@ -398,8 +395,6 @@ pub struct Stack<'a, T: Config, E> { /// All the bytes added to this field should be valid UTF-8. The buffer has no defined /// structure and is intended to be shown to users as-is for debugging purposes. debug_message: Option<&'a mut Vec>, - /// The determinism requirement of this call stack. - determinism: Determinism, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -606,7 +601,6 @@ where value: BalanceOf, input_data: Vec, debug_message: Option<&'a mut Vec>, - determinism: Determinism, ) -> Result { let (mut stack, executable) = Self::new( FrameArgs::Call { dest, cached_info: None, delegated_call: None }, @@ -616,7 +610,6 @@ where schedule, value, debug_message, - determinism, )?; stack.run(executable, input_data) } @@ -655,7 +648,6 @@ where schedule, value, debug_message, - Determinism::Deterministic, )?; let account_id = stack.top_frame().account_id.clone(); stack.run(executable, input_data).map(|ret| (account_id, ret)) @@ -670,17 +662,9 @@ where schedule: &'a Schedule, value: BalanceOf, debug_message: Option<&'a mut Vec>, - determinism: Determinism, ) -> Result<(Self, E), ExecError> { - let (first_frame, executable, nonce) = Self::new_frame( - args, - value, - gas_meter, - storage_meter, - Weight::zero(), - schedule, - determinism, - )?; + let (first_frame, executable, nonce) = + Self::new_frame(args, value, gas_meter, storage_meter, 0, schedule)?; let stack = Self { origin, schedule, @@ -692,7 +676,6 @@ where first_frame, frames: Default::default(), debug_message, - determinism, _phantom: Default::default(), }; @@ -710,7 +693,6 @@ where storage_meter: &mut storage::meter::GenericMeter, gas_limit: Weight, schedule: &Schedule, - determinism: Determinism, ) -> Result<(Frame, E, Option), ExecError> { let (account_id, contract_info, executable, delegate_caller, entry_point, nonce) = match frame_args { @@ -747,15 +729,6 @@ where }, }; - // `AllowIndeterminism` will only be ever set in case of off-chain execution. - // Instantiations are never allowed even when executing off-chain. - if !(executable.is_deterministic() || - (matches!(determinism, Determinism::AllowIndeterminism) && - matches!(entry_point, ExportedFunction::Call))) - { - return Err(Error::::Indeterministic.into()) - } - let frame = Frame { delegate_caller, value_transferred, @@ -802,7 +775,6 @@ where nested_storage, gas_limit, self.schedule, - self.determinism, )?; self.frames.push(frame); Ok(executable) @@ -812,19 +784,16 @@ where /// /// This can be either a call or an instantiate. fn run(&mut self, executable: E, input_data: Vec) -> Result { - let frame = self.top_frame(); - let entry_point = frame.entry_point; - let delegated_code_hash = - if frame.delegate_caller.is_some() { Some(*executable.code_hash()) } else { None }; + let entry_point = self.top_frame().entry_point; let do_transaction = || { // We need to charge the storage deposit before the initial transfer so that // it can create the account in case the initial transfer is < ed. if entry_point == ExportedFunction::Constructor { - let frame = top_frame_mut!(self); - frame.nested_storage.charge_instantiate( + let top_frame = top_frame_mut!(self); + top_frame.nested_storage.charge_instantiate( &self.origin, - &frame.account_id, - frame.contract_info.get(&frame.account_id), + &top_frame.account_id, + top_frame.contract_info.get(&top_frame.account_id), )?; } @@ -836,51 +805,23 @@ where .execute(self, &entry_point, input_data) .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; - // Avoid useless work that would be reverted anyways. - if output.did_revert() { - return Ok(output) - } + // Additional work needs to be performed in case of an instantiation. + if !output.did_revert() && entry_point == ExportedFunction::Constructor { + let frame = self.top_frame(); - // Storage limit is enforced as late as possible (when the last frame returns) so that - // the ordering of storage accesses does not matter. - if self.frames.is_empty() { - let frame = &mut self.first_frame; - frame.contract_info.load(&frame.account_id); - let contract = frame.contract_info.as_contract(); - frame.nested_storage.enforce_limit(contract)?; - } + // It is not allowed to terminate a contract inside its constructor. + if matches!(frame.contract_info, CachedContract::Terminated) { + return Err(Error::::TerminatedInConstructor.into()) + } - let frame = self.top_frame(); - let account_id = &frame.account_id; - match (entry_point, delegated_code_hash) { - (ExportedFunction::Constructor, _) => { - // It is not allowed to terminate a contract inside its constructor. - if matches!(frame.contract_info, CachedContract::Terminated) { - return Err(Error::::TerminatedInConstructor.into()) - } - - // Deposit an instantiation event. - Contracts::::deposit_event( - vec![T::Hashing::hash_of(self.caller()), T::Hashing::hash_of(account_id)], - Event::Instantiated { - deployer: self.caller().clone(), - contract: account_id.clone(), - }, - ); - }, - (ExportedFunction::Call, Some(code_hash)) => { - Contracts::::deposit_event( - vec![T::Hashing::hash_of(account_id), T::Hashing::hash_of(&code_hash)], - Event::DelegateCalled { contract: account_id.clone(), code_hash }, - ); - }, - (ExportedFunction::Call, None) => { - let caller = self.caller(); - Contracts::::deposit_event( - vec![T::Hashing::hash_of(caller), T::Hashing::hash_of(account_id)], - Event::Called { caller: caller.clone(), contract: account_id.clone() }, - ); - }, + // Deposit an instantiation event. + deposit_event::( + vec![], + Event::Instantiated { + deployer: self.caller().clone(), + contract: frame.account_id.clone(), + }, + ); } Ok(output) @@ -909,7 +850,6 @@ where // has changed. Err(error) => (false, Err(error.into())), }; - self.pop_frame(success); output } @@ -948,7 +888,12 @@ where // it was invalidated. frame.contract_info.load(account_id); let mut contract = frame.contract_info.into_contract(); - prev.nested_storage.absorb(frame.nested_storage, account_id, contract.as_mut()); + prev.nested_storage.absorb( + frame.nested_storage, + &self.origin, + account_id, + contract.as_mut(), + ); // In case the contract wasn't terminated we need to persist changes made to it. if let Some(contract) = contract { @@ -986,6 +931,7 @@ where let mut contract = self.first_frame.contract_info.as_contract(); self.storage_meter.absorb( mem::take(&mut self.first_frame.nested_storage), + &self.origin, &self.first_frame.account_id, contract.as_deref_mut(), ); @@ -1143,7 +1089,7 @@ where delegated_call: Some(DelegatedCall { executable, caller: self.caller().clone() }), }, value, - Weight::zero(), + 0, )?; self.run(executable, input_data) } @@ -1188,13 +1134,10 @@ where )?; ContractInfoOf::::remove(&frame.account_id); E::remove_user(info.code_hash); - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&frame.account_id), T::Hashing::hash_of(&beneficiary)], - Event::Terminated { - contract: frame.account_id.clone(), - beneficiary: beneficiary.clone(), - }, - ); + Contracts::::deposit_event(Event::Terminated { + contract: frame.account_id.clone(), + beneficiary: beneficiary.clone(), + }); Ok(()) } @@ -1299,7 +1242,7 @@ where } fn deposit_event(&mut self, topics: Vec, data: Vec) { - Contracts::::deposit_event( + deposit_event::( topics, Event::ContractEmitted { contract: self.top_frame().account_id.clone(), data }, ); @@ -1336,8 +1279,8 @@ where } } - fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo { - let mut origin: T::RuntimeOrigin = RawOrigin::Signed(self.address().clone()).into(); + fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo { + let mut origin: T::Origin = RawOrigin::Signed(self.address().clone()).into(); origin.add_filter(T::CallFilter::contains); call.dispatch(origin) } @@ -1356,26 +1299,27 @@ where } fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { - let frame = top_frame_mut!(self); - if !E::from_storage(hash, self.schedule, &mut frame.nested_gas)?.is_deterministic() { - return Err(>::Indeterministic.into()) - } E::add_user(hash)?; - let prev_hash = frame.contract_info().code_hash; + let top_frame = self.top_frame_mut(); + let prev_hash = top_frame.contract_info().code_hash; E::remove_user(prev_hash); - frame.contract_info().code_hash = hash; - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&frame.account_id), hash, prev_hash], - Event::ContractCodeUpdated { - contract: frame.account_id.clone(), - new_code_hash: hash, - old_code_hash: prev_hash, - }, - ); + top_frame.contract_info().code_hash = hash; + Contracts::::deposit_event(Event::ContractCodeUpdated { + contract: top_frame.account_id.clone(), + new_code_hash: hash, + old_code_hash: prev_hash, + }); Ok(()) } } +fn deposit_event(topics: Vec, event: Event) { + >::deposit_event_indexed( + &topics, + ::Event::from(event).into(), + ) +} + mod sealing { use super::*; @@ -1403,18 +1347,19 @@ mod tests { gas::GasMeter, storage::Storage, tests::{ - test_utils::{get_balance, hash, place_contract, set_balance}, - ExtBuilder, RuntimeCall, RuntimeEvent as MetaEvent, Test, TestFilter, ALICE, BOB, - CHARLIE, GAS_LIMIT, + test_utils::{get_balance, place_contract, set_balance}, + Call, Event as MetaEvent, ExtBuilder, Test, TestFilter, ALICE, BOB, CHARLIE, GAS_LIMIT, }, Error, }; use assert_matches::assert_matches; use codec::{Decode, Encode}; - use frame_support::{assert_err, assert_ok, parameter_types}; + use frame_support::{assert_err, assert_ok}; use frame_system::{EventRecord, Phase}; + use hex_literal::hex; use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; + use sp_core::Bytes; use sp_runtime::{traits::Hash, DispatchError}; use std::{ cell::RefCell, @@ -1426,8 +1371,8 @@ mod tests { type MockStack<'a> = Stack<'a, Test, MockExecutable>; - parameter_types! { - static Loader: MockLoader = MockLoader::default(); + thread_local! { + static LOADER: RefCell = RefCell::new(MockLoader::default()); } fn events() -> Vec> { @@ -1453,8 +1398,8 @@ mod tests { refcount: u64, } - #[derive(Default, Clone)] - pub struct MockLoader { + #[derive(Default)] + struct MockLoader { map: HashMap, MockExecutable>, counter: u64, } @@ -1464,20 +1409,27 @@ mod tests { func_type: ExportedFunction, f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static, ) -> CodeHash { - Loader::mutate(|loader| { + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); // Generate code hashes as monotonically increasing values. let hash = ::Hash::from_low_u64_be(loader.counter); loader.counter += 1; loader.map.insert( hash, - MockExecutable { func: Rc::new(f), func_type, code_hash: hash, refcount: 1 }, + MockExecutable { + func: Rc::new(f), + func_type, + code_hash: hash.clone(), + refcount: 1, + }, ); hash }) } fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { - Loader::mutate(|loader| { + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); match loader.map.entry(code_hash) { Entry::Vacant(_) => Err(>::CodeNotFound)?, Entry::Occupied(mut entry) => entry.get_mut().refcount += 1, @@ -1488,7 +1440,8 @@ mod tests { fn decrement_refcount(code_hash: CodeHash) { use std::collections::hash_map::Entry::Occupied; - Loader::mutate(|loader| { + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); let mut entry = match loader.map.entry(code_hash) { Occupied(e) => e, _ => panic!("code_hash does not exist"), @@ -1508,8 +1461,13 @@ mod tests { _schedule: &Schedule, _gas_meter: &mut GasMeter, ) -> Result { - Loader::mutate(|loader| { - loader.map.get(&code_hash).cloned().ok_or(Error::::CodeNotFound.into()) + LOADER.with(|loader| { + loader + .borrow_mut() + .map + .get(&code_hash) + .cloned() + .ok_or(Error::::CodeNotFound.into()) }) } @@ -1544,14 +1502,10 @@ mod tests { fn code_len(&self) -> u32 { 0 } - - fn is_deterministic(&self) -> bool { - true - } } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) } fn exec_trapped() -> ExecResult { @@ -1560,14 +1514,14 @@ mod tests { #[test] fn it_works() { - parameter_types! { - static TestData: Vec = vec![0]; + thread_local! { + static TEST_DATA: RefCell> = RefCell::new(vec![0]); } let value = Default::default(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let exec_ch = MockLoader::insert(Call, |_ctx, _executable| { - TestData::mutate(|data| data.push(1)); + TEST_DATA.with(|data| data.borrow_mut().push(1)); exec_success() }); @@ -1586,13 +1540,12 @@ mod tests { value, vec![], None, - Determinism::Deterministic, ), Ok(_) ); }); - assert_eq!(TestData::get(), vec![0, 1]); + TEST_DATA.with(|data| assert_eq!(*data.borrow(), vec![0, 1])); } #[test] @@ -1621,7 +1574,7 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) }); ExtBuilder::default().build().execute_with(|| { @@ -1640,7 +1593,6 @@ mod tests { value, vec![], None, - Determinism::Deterministic, ) .unwrap(); @@ -1657,13 +1609,13 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) }); let delegate_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) }); ExtBuilder::default().build().execute_with(|| { @@ -1682,7 +1634,6 @@ mod tests { value, vec![], None, - Determinism::Deterministic, ) .unwrap(); @@ -1699,7 +1650,7 @@ mod tests { let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) }); ExtBuilder::default().build().execute_with(|| { @@ -1718,7 +1669,6 @@ mod tests { 55, vec![], None, - Determinism::Deterministic, ) .unwrap(); @@ -1753,7 +1703,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) }); ExtBuilder::default().build().execute_with(|| { @@ -1770,12 +1720,11 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ); let output = result.unwrap(); assert!(!output.did_revert()); - assert_eq!(output.data, vec![1, 2, 3, 4]); + assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1786,7 +1735,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) }); ExtBuilder::default().build().execute_with(|| { @@ -1803,12 +1752,11 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ); let output = result.unwrap(); assert!(output.did_revert()); - assert_eq!(output.data, vec![1, 2, 3, 4]); + assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1834,7 +1782,6 @@ mod tests { 0, vec![1, 2, 3, 4], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -1877,15 +1824,16 @@ mod tests { fn max_depth() { // This test verifies that when we reach the maximal depth creation of an // yet another context fails. - parameter_types! { - static ReachedBottom: bool = false; + thread_local! { + static REACHED_BOTTOM: RefCell = RefCell::new(false); } let value = Default::default(); let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. - let r = ctx.ext.call(Weight::zero(), BOB, 0, vec![], true); + let r = ctx.ext.call(0, BOB, 0, vec![], true); - ReachedBottom::mutate(|reached_bottom| { + REACHED_BOTTOM.with(|reached_bottom| { + let mut reached_bottom = reached_bottom.borrow_mut(); if !*reached_bottom { // We are first time here, it means we just reached bottom. // Verify that we've got proper error and set `reached_bottom`. @@ -1915,7 +1863,6 @@ mod tests { value, vec![], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); @@ -1927,22 +1874,24 @@ mod tests { let origin = ALICE; let dest = BOB; - parameter_types! { - static WitnessedCallerBob: Option> = None; - static WitnessedCallerCharlie: Option> = None; + thread_local! { + static WITNESSED_CALLER_BOB: RefCell>> = RefCell::new(None); + static WITNESSED_CALLER_CHARLIE: RefCell>> = RefCell::new(None); } let bob_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for bob. - WitnessedCallerBob::mutate(|caller| *caller = Some(ctx.ext.caller().clone())); + WITNESSED_CALLER_BOB + .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. - assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for charlie. - WitnessedCallerCharlie::mutate(|caller| *caller = Some(ctx.ext.caller().clone())); + WITNESSED_CALLER_CHARLIE + .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); exec_success() }); @@ -1961,14 +1910,13 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); - assert_eq!(WitnessedCallerBob::get(), Some(origin)); - assert_eq!(WitnessedCallerCharlie::get(), Some(dest)); + WITNESSED_CALLER_BOB.with(|caller| assert_eq!(*caller.borrow(), Some(origin))); + WITNESSED_CALLER_CHARLIE.with(|caller| assert_eq!(*caller.borrow(), Some(dest))); } #[test] @@ -1995,7 +1943,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2025,7 +1972,6 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2053,7 +1999,6 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2071,7 +2016,7 @@ mod tests { // ALICE is the origin of the call stack assert!(ctx.ext.caller_is_origin()); // BOB calls CHARLIE - ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true) + ctx.ext.call(0, CHARLIE, 0, vec![], true) }); ExtBuilder::default().build().execute_with(|| { @@ -2089,7 +2034,6 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2102,7 +2046,7 @@ mod tests { assert_eq!(*ctx.ext.address(), BOB); // Call into charlie contract. - assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -2125,7 +2069,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); @@ -2163,7 +2106,7 @@ mod tests { #[test] fn instantiation_work_with_success_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2188,7 +2131,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address + Ok((address, ref output)) if output.data == Bytes(vec![80, 65, 83, 83]) => address ); // Check that the newly created account has the expected code hash and @@ -2207,7 +2150,7 @@ mod tests { #[test] fn instantiation_fails_with_failing_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2232,7 +2175,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address + Ok((address, ref output)) if output.data == Bytes(vec![70, 65, 73, 76]) => address ); // Check that the account has not been created. @@ -2246,13 +2189,14 @@ mod tests { let dummy_ch = MockLoader::insert(Call, |_, _| exec_success()); let instantiated_contract_address = Rc::new(RefCell::new(None::>)); let instantiator_ch = MockLoader::insert(Call, { + let dummy_ch = dummy_ch.clone(); let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. let (address, output) = ctx .ext .instantiate( - Weight::zero(), + 0, dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2284,7 +2228,6 @@ mod tests { min_balance * 10, vec![], None, - Determinism::Deterministic, ), Ok(_) ); @@ -2300,10 +2243,7 @@ mod tests { ); assert_eq!( &events(), - &[ - Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }, - Event::Called { caller: ALICE, contract: BOB }, - ] + &[Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }] ); }); } @@ -2312,11 +2252,12 @@ mod tests { fn instantiation_traps() { let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into())); let instantiator_ch = MockLoader::insert(Call, { + let dummy_ch = dummy_ch.clone(); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. assert_matches!( ctx.ext.instantiate( - Weight::zero(), + 0, dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2349,14 +2290,13 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ), Ok(_) ); // The contract wasn't instantiated so we don't expect to see an instantiation // event here. - assert_eq!(&events(), &[Event::Called { caller: ALICE, contract: BOB },]); + assert_eq!(&events(), &[]); }); } @@ -2407,15 +2347,15 @@ mod tests { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { let info = ctx.ext.contract_info(); - assert_eq!(info.storage_byte_deposit, 0); - info.storage_byte_deposit = 42; - assert_eq!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), exec_trapped()); - assert_eq!(ctx.ext.contract_info().storage_byte_deposit, 42); + assert_eq!(info.storage_deposit, 0); + info.storage_deposit = 42; + assert_eq!(ctx.ext.call(0, CHARLIE, 0, vec![], true), exec_trapped()); + assert_eq!(ctx.ext.contract_info().storage_deposit, 42); } exec_success() }); let code_charlie = MockLoader::insert(Call, |ctx, _| { - assert!(ctx.ext.call(Weight::zero(), BOB, 0, vec![99], true).is_ok()); + assert!(ctx.ext.call(0, BOB, 0, vec![99], true).is_ok()); exec_trapped() }); @@ -2435,7 +2375,6 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2445,7 +2384,7 @@ mod tests { fn recursive_call_during_constructor_fails() { let code = MockLoader::insert(Constructor, |ctx, _| { assert_matches!( - ctx.ext.call(Weight::zero(), ctx.ext.address().clone(), 0, vec![], true), + ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![], true), Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() @@ -2502,7 +2441,6 @@ mod tests { 0, vec![], Some(&mut debug_buffer), - Determinism::Deterministic, ) .unwrap(); }); @@ -2536,7 +2474,6 @@ mod tests { 0, vec![], Some(&mut debug_buffer), - Determinism::Deterministic, ); assert!(result.is_err()); }); @@ -2549,7 +2486,7 @@ mod tests { // call the contract passed as input with disabled reentry let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); - ctx.ext.call(Weight::zero(), dest, 0, vec![], false) + ctx.ext.call(0, dest, 0, vec![], false) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -2570,7 +2507,6 @@ mod tests { 0, CHARLIE.encode(), None, - Determinism::Deterministic )); // Calling into oneself fails @@ -2584,7 +2520,6 @@ mod tests { 0, BOB.encode(), None, - Determinism::Deterministic ) .map_err(|e| e.error), >::ReentranceDenied, @@ -2596,7 +2531,7 @@ mod tests { fn call_deny_reentry() { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { - ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], false) + ctx.ext.call(0, CHARLIE, 0, vec![], false) } else { exec_success() } @@ -2604,7 +2539,7 @@ mod tests { // call BOB with input set to '1' let code_charlie = - MockLoader::insert(Call, |ctx, _| ctx.ext.call(Weight::zero(), BOB, 0, vec![1], true)); + MockLoader::insert(Call, |ctx, _| ctx.ext.call(0, BOB, 0, vec![1], true)); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -2623,7 +2558,6 @@ mod tests { 0, vec![0], None, - Determinism::Deterministic ) .map_err(|e| e.error), >::ReentranceDenied, @@ -2634,7 +2568,7 @@ mod tests { #[test] fn call_runtime_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { - let call = RuntimeCall::System(frame_system::Call::remark_with_event { + let call = Call::System(frame_system::Call::remark_with_event { remark: b"Hello World".to_vec(), }); ctx.ext.call_runtime(call).unwrap(); @@ -2658,31 +2592,20 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ) .unwrap(); let remark_hash = ::Hashing::hash(b"Hello World"); assert_eq!( System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::System(frame_system::Event::Remarked { - sender: BOB, - hash: remark_hash - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: BOB, - }), - topics: vec![hash(&ALICE), hash(&BOB)], - }, - ] + vec![EventRecord { + phase: Phase::Initialization, + event: MetaEvent::System(frame_system::Event::Remarked { + sender: BOB, + hash: remark_hash + }), + topics: vec![], + },] ); }); } @@ -2696,11 +2619,10 @@ mod tests { // remark should still be allowed let allowed_call = - RuntimeCall::System(SysCall::remark_with_event { remark: b"Hello".to_vec() }); + Call::System(SysCall::remark_with_event { remark: b"Hello".to_vec() }); // transfers are disallowed by the `TestFiler` (see below) - let forbidden_call = - RuntimeCall::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); + let forbidden_call = Call::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); // simple cases: direct call assert_err!( @@ -2709,7 +2631,7 @@ mod tests { ); // as part of a patch: return is OK (but it interrupted the batch) - assert_ok!(ctx.ext.call_runtime(RuntimeCall::Utility(UtilCall::batch { + assert_ok!(ctx.ext.call_runtime(Call::Utility(UtilCall::batch { calls: vec![allowed_call.clone(), forbidden_call, allowed_call] })),); @@ -2720,7 +2642,7 @@ mod tests { }); TestFilter::set_filter(|call| match call { - RuntimeCall::Balances(pallet_balances::Call::transfer { .. }) => false, + Call::Balances(pallet_balances::Call::transfer { .. }) => false, _ => true, }); @@ -2741,7 +2663,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ) .unwrap(); @@ -2770,14 +2691,6 @@ mod tests { },), topics: vec![], }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: BOB, - }), - topics: vec![hash(&ALICE), hash(&BOB)], - }, ] ); }); @@ -2789,30 +2702,18 @@ mod tests { let success_code = MockLoader::insert(Constructor, |_, _| exec_success()); let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| { ctx.ext - .instantiate( - Weight::zero(), - fail_code, - ctx.ext.minimum_balance() * 100, - vec![], - &[], - ) + .instantiate(0, fail_code, ctx.ext.minimum_balance() * 100, vec![], &[]) .ok(); exec_success() }); let succ_succ_code = MockLoader::insert(Constructor, move |ctx, _| { let (account_id, _) = ctx .ext - .instantiate( - Weight::zero(), - success_code, - ctx.ext.minimum_balance() * 100, - vec![], - &[], - ) + .instantiate(0, success_code, ctx.ext.minimum_balance() * 100, vec![], &[]) .unwrap(); // a plain call should not influence the account counter - ctx.ext.call(Weight::zero(), account_id, 0, vec![], false).unwrap(); + ctx.ext.call(0, account_id, 0, vec![], false).unwrap(); exec_success() }); @@ -2945,7 +2846,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic )); }); } @@ -3072,7 +2972,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic )); }); } @@ -3108,7 +3007,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic )); }); } @@ -3144,7 +3042,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic )); }); } @@ -3206,7 +3103,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic )); }); } @@ -3268,7 +3164,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic )); }); } @@ -3276,12 +3171,13 @@ mod tests { #[test] fn ecdsa_to_eth_address_returns_proper_value() { let bob_ch = MockLoader::insert(Call, |ctx, _| { - let pubkey_compressed = array_bytes::hex2array_unchecked( - "028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd91", - ); + let pubkey_compressed: [u8; 33] = + hex!("028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd91")[..] + .try_into() + .unwrap(); assert_eq!( ctx.ext.ecdsa_to_eth_address(&pubkey_compressed).unwrap(), - array_bytes::hex2array_unchecked::<20>("09231da7b19A016f9e576d23B16277062F4d46A8") + hex!("09231da7b19A016f9e576d23B16277062F4d46A8")[..] ); exec_success() }); @@ -3300,7 +3196,6 @@ mod tests { 0, vec![], None, - Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index c0cc2db2aa3eb..41df125da0170 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -107,45 +107,32 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { + let amount = if amount == 0 { self.gas_left } else { amount }; + // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - let amount = Weight::from_parts( - if amount.ref_time().is_zero() { - self.gas_left().ref_time() - } else { - amount.ref_time() - }, - if amount.proof_size().is_zero() { - self.gas_left().proof_size() - } else { - amount.proof_size() - }, - ); - self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| >::OutOfGas)?; - Ok(GasMeter::new(amount)) + if self.gas_left < amount { + Err(>::OutOfGas.into()) + } else { + self.gas_left -= amount; + Ok(GasMeter::new(amount)) + } } /// Absorb the remaining gas of a nested meter after we are done using it. pub fn absorb_nested(&mut self, nested: Self) { - if self.gas_left.ref_time().is_zero() { + if self.gas_left == 0 { // All of the remaining gas was inherited by the nested gas meter. When absorbing // we can therefore safely inherit the lowest gas that the nested gas meter experienced // as long as it is lower than the lowest gas that was experienced by the parent. // We cannot call `self.gas_left_lowest()` here because in the state that this // code is run the parent gas meter has `0` gas left. - *self.gas_left_lowest.ref_time_mut() = - nested.gas_left_lowest().ref_time().min(self.gas_left_lowest.ref_time()); + self.gas_left_lowest = nested.gas_left_lowest().min(self.gas_left_lowest); } else { // The nested gas meter was created with a fixed amount that did not consume all of the // parents (self) gas. The lowest gas that self will experience is when the nested // gas was pre charged with the fixed amount. - *self.gas_left_lowest.ref_time_mut() = self.gas_left_lowest().ref_time(); - } - if self.gas_left.proof_size().is_zero() { - *self.gas_left_lowest.proof_size_mut() = - nested.gas_left_lowest().proof_size().min(self.gas_left_lowest.proof_size()); - } else { - *self.gas_left_lowest.proof_size_mut() = self.gas_left_lowest().proof_size(); + self.gas_left_lowest = self.gas_left_lowest(); } self.gas_left += nested.gas_left; } @@ -168,11 +155,17 @@ where ErasedToken { description: format!("{:?}", token), token: Box::new(token) }; self.tokens.push(erased_tok); } + let amount = token.weight(); - // It is OK to not charge anything on failure because we always charge _before_ we perform - // any action - self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| Error::::OutOfGas)?; - Ok(ChargedAmount(amount)) + let new_value = self.gas_left.checked_sub(amount); + + // We always consume the gas even if there is not enough gas. + self.gas_left = new_value.unwrap_or_else(Zero::zero); + + match new_value { + Some(_) => Ok(ChargedAmount(amount)), + None => Err(Error::::OutOfGas.into()), + } } /// Adjust a previously charged amount down to its actual amount. @@ -234,7 +227,7 @@ where #[cfg(test)] mod tests { - use super::{GasMeter, Token, Weight}; + use super::{GasMeter, Token}; use crate::tests::Test; /// A simple utility macro that helps to match against a @@ -278,20 +271,20 @@ mod tests { #[derive(Copy, Clone, PartialEq, Eq, Debug)] struct SimpleToken(u64); impl Token for SimpleToken { - fn weight(&self) -> Weight { - Weight::from_ref_time(self.0) + fn weight(&self) -> u64 { + self.0 } } #[test] fn it_works() { - let gas_meter = GasMeter::::new(Weight::from_ref_time(50000)); - assert_eq!(gas_meter.gas_left(), Weight::from_ref_time(50000)); + let gas_meter = GasMeter::::new(50000); + assert_eq!(gas_meter.gas_left(), 50000); } #[test] fn tracing() { - let mut gas_meter = GasMeter::::new(Weight::from_ref_time(50000)); + let mut gas_meter = GasMeter::::new(50000); assert!(!gas_meter.charge(SimpleToken(1)).is_err()); let mut tokens = gas_meter.tokens().iter(); @@ -301,27 +294,31 @@ mod tests { // This test makes sure that nothing can be executed if there is no gas. #[test] fn refuse_to_execute_anything_if_zero() { - let mut gas_meter = GasMeter::::new(Weight::zero()); + let mut gas_meter = GasMeter::::new(0); assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that the gas meter does not charge in case of overcharger + // Make sure that if the gas meter is charged by exceeding amount then not only an error + // returned for that charge, but also for all consequent charges. + // + // This is not strictly necessary, because the execution should be interrupted immediately + // if the gas meter runs out of gas. However, this is just a nice property to have. #[test] - fn overcharge_does_not_charge() { - let mut gas_meter = GasMeter::::new(Weight::from_ref_time(200)); + fn overcharge_is_unrecoverable() { + let mut gas_meter = GasMeter::::new(200); // The first charge is should lead to OOG. assert!(gas_meter.charge(SimpleToken(300)).is_err()); - // The gas meter should still contain the full 200. - assert!(gas_meter.charge(SimpleToken(200)).is_ok()); + // The gas meter is emptied at this moment, so this should also fail. + assert!(gas_meter.charge(SimpleToken(1)).is_err()); } // Charging the exact amount that the user paid for should be // possible. #[test] fn charge_exact_amount() { - let mut gas_meter = GasMeter::::new(Weight::from_ref_time(25)); + let mut gas_meter = GasMeter::::new(25); assert!(!gas_meter.charge(SimpleToken(25)).is_err()); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 52fb0190ba3a9..319bacaab7789 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -87,12 +87,12 @@ mod gas; mod benchmarking; mod exec; -mod migration; mod schedule; mod storage; mod wasm; pub mod chain_extension; +pub mod migration; pub mod weights; #[cfg(test)] @@ -105,34 +105,29 @@ use crate::{ wasm::{OwnerInfo, PrefabWasmModule}, weights::WeightInfo, }; -use codec::{Codec, Encode, HasCompact}; +use codec::{Encode, HasCompact}; use frame_support::{ - dispatch::{Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, + dispatch::Dispatchable, ensure, - traits::{ - tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, - ReservableCurrency, Time, - }, - weights::{OldWeight, Weight}, - BoundedVec, WeakBoundedVec, + traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, + weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, Weight}, + BoundedVec, }; -use frame_system::Pallet as System; +use frame_system::{limits::BlockWeights, Pallet as System}; use pallet_contracts_primitives::{ Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, ContractInstantiateResult, ExecReturnValue, GetStorageResult, InstantiateReturnValue, StorageDeposit, }; use scale_info::TypeInfo; -use sp_core::crypto::UncheckedFrom; +use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; pub use crate::{ exec::{Frame, VarSizedKey as StorageKey}, - migration::Migration, pallet::*, schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, - wasm::Determinism, }; type CodeHash = ::Hash; @@ -140,8 +135,7 @@ type TrieId = BoundedVec>; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type CodeVec = BoundedVec::MaxCodeLen>; -type RelaxedCodeVec = WeakBoundedVec::MaxCodeLen>; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type RelaxedCodeVec = BoundedVec::RelaxedMaxCodeLen>; /// Used as a sentinel value when reading and writing contract memory. /// @@ -200,6 +194,29 @@ where } } +/// A conservative implementation to be used for [`pallet::Config::ContractAccessWeight`]. +/// +/// This derives the weight from the [`BlockWeights`] passed as `B` and the `maxPovSize` passed +/// as `P`. The default value for `P` is the `maxPovSize` used by Polkadot and Kusama. +/// +/// It simply charges from the weight meter pro rata: If loading the contract code would consume +/// 50% of the max storage proof then this charges 50% of the max block weight. +pub struct DefaultContractAccessWeight, const P: u32 = 5_242_880>( + PhantomData, +); + +impl, const P: u32> Get for DefaultContractAccessWeight { + fn get() -> Weight { + let block_weights = B::get(); + block_weights + .per_class + .get(DispatchClass::Normal) + .max_total + .unwrap_or(block_weights.max_block) / + Weight::from(P) + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -207,7 +224,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(7); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -218,21 +235,20 @@ pub mod pallet { /// The time implementation used to supply timestamps to contracts through `seal_now`. type Time: Time; - /// The generator used to supply randomness to contracts through `seal_random` + /// The generator used to supply randomness to contracts through `seal_random`. type Randomness: Randomness; /// The currency in which fees are paid and contract balances are held. - type Currency: ReservableCurrency - + Inspect>; + type Currency: ReservableCurrency; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The overarching call type. - type RuntimeCall: Dispatchable + type Call: Dispatchable + GetDispatchInfo + codec::Decode - + IsType<::RuntimeCall>; + + IsType<::Call>; /// Filter that is applied to calls dispatched by contracts. /// @@ -243,7 +259,7 @@ pub mod pallet { /// # Stability /// /// The runtime **must** make sure that all dispatchables that are callable by - /// contracts remain stable. In addition [`Self::RuntimeCall`] itself must remain stable. + /// contracts remain stable. In addition [`Self::Call`] itself must remain stable. /// This means that no existing variants are allowed to switch their positions. /// /// # Note @@ -253,7 +269,7 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. - type CallFilter: Contains<::RuntimeCall>; + type CallFilter: Contains<::Call>; /// Used to answer contracts' queries regarding the current weight price. This is **not** /// used to calculate the actual fee and is only for informational purposes. @@ -312,6 +328,27 @@ pub mod pallet { #[pallet::constant] type DepositPerByte: Get>; + /// The weight per byte of code that is charged when loading a contract from storage. + /// + /// Currently, FRAME only charges fees for computation incurred but not for PoV + /// consumption caused for storage access. This is usually not exploitable because + /// accessing storage carries some substantial weight costs, too. However in case + /// of contract code very much PoV consumption can be caused while consuming very little + /// computation. This could be used to keep the chain busy without paying the + /// proper fee for it. Until this is resolved we charge from the weight meter for + /// contract access. + /// + /// For more information check out: + /// + /// [`DefaultContractAccessWeight`] is a safe default to be used for Polkadot or Kusama + /// parachains. + /// + /// # Note + /// + /// This is only relevant for parachains. Set to zero in case of a standalone chain. + #[pallet::constant] + type ContractAccessWeight: Get; + /// The amount of balance a caller has to pay for each storage item. /// /// # Note @@ -328,6 +365,15 @@ pub mod pallet { /// a wasm binary below this maximum size. type MaxCodeLen: Get; + /// The maximum length of a contract code after reinstrumentation. + /// + /// When uploading a new contract the size defined by [`Self::MaxCodeLen`] is used for both + /// the pristine **and** the instrumented version. When a existing contract needs to be + /// reinstrumented after a runtime upgrade we apply this bound. The reason is that if the + /// new instrumentation increases the size beyond the limit it would make that contract + /// inaccessible until rectified by another runtime upgrade. + type RelaxedMaxCodeLen: Get; + /// The maximum allowable length in bytes for storage keys. type MaxStorageKeyLen: Get; } @@ -370,169 +416,6 @@ pub mod pallet { T::AccountId: AsRef<[u8]>, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, { - /// Deprecated version if [`Self::call`] for use in an in-storage `Call`. - #[pallet::weight(T::WeightInfo::call().saturating_add(>::compat_weight(*gas_limit)))] - #[allow(deprecated)] - #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] - pub fn call_old_weight( - origin: OriginFor, - dest: AccountIdLookupOf, - #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: OldWeight, - storage_deposit_limit: Option< as codec::HasCompact>::Type>, - data: Vec, - ) -> DispatchResultWithPostInfo { - Self::call( - origin, - dest, - value, - >::compat_weight(gas_limit), - storage_deposit_limit, - data, - ) - } - - /// Deprecated version if [`Self::instantiate_with_code`] for use in an in-storage `Call`. - #[pallet::weight( - T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add(>::compat_weight(*gas_limit)) - )] - #[allow(deprecated)] - #[deprecated( - note = "1D weight is used in this extrinsic, please migrate to `instantiate_with_code`" - )] - pub fn instantiate_with_code_old_weight( - origin: OriginFor, - #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: OldWeight, - storage_deposit_limit: Option< as codec::HasCompact>::Type>, - code: Vec, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo { - Self::instantiate_with_code( - origin, - value, - >::compat_weight(gas_limit), - storage_deposit_limit, - code, - data, - salt, - ) - } - - /// Deprecated version if [`Self::instantiate`] for use in an in-storage `Call`. - #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add(>::compat_weight(*gas_limit)) - )] - #[allow(deprecated)] - #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] - pub fn instantiate_old_weight( - origin: OriginFor, - #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: OldWeight, - storage_deposit_limit: Option< as codec::HasCompact>::Type>, - code_hash: CodeHash, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo { - Self::instantiate( - origin, - value, - >::compat_weight(gas_limit), - storage_deposit_limit, - code_hash, - data, - salt, - ) - } - - /// Upload new `code` without instantiating a contract from it. - /// - /// If the code does not already exist a deposit is reserved from the caller - /// and unreserved only when [`Self::remove_code`] is called. The size of the reserve - /// depends on the instrumented size of the the supplied `code`. - /// - /// If the code already exists in storage it will still return `Ok` and upgrades - /// the in storage version to the current - /// [`InstructionWeights::version`](InstructionWeights). - /// - /// - `determinism`: If this is set to any other value but [`Determinism::Deterministic`] - /// then the only way to use this code is to delegate call into it from an offchain - /// execution. Set to [`Determinism::Deterministic`] if in doubt. - /// - /// # Note - /// - /// Anyone can instantiate a contract from any uploaded code and thus prevent its removal. - /// To avoid this situation a constructor could employ access control so that it can - /// only be instantiated by permissioned entities. The same is true when uploading - /// through [`Self::instantiate_with_code`]. - #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] - pub fn upload_code( - origin: OriginFor, - code: Vec, - storage_deposit_limit: Option< as codec::HasCompact>::Type>, - determinism: Determinism, - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism) - .map(|_| ()) - } - - /// Remove the code stored under `code_hash` and refund the deposit to its owner. - /// - /// A code can only be removed by its original uploader (its owner) and only if it is - /// not used by any contract. - #[pallet::weight(T::WeightInfo::remove_code())] - pub fn remove_code( - origin: OriginFor, - code_hash: CodeHash, - ) -> DispatchResultWithPostInfo { - let origin = ensure_signed(origin)?; - >::remove(&origin, code_hash)?; - // we waive the fee because removing unused code is beneficial - Ok(Pays::No.into()) - } - - /// Privileged function that changes the code of an existing contract. - /// - /// This takes care of updating refcounts and all other necessary operations. Returns - /// an error if either the `code_hash` or `dest` do not exist. - /// - /// # Note - /// - /// This does **not** change the address of the contract in question. This means - /// that the contract address is no longer derived from its code hash after calling - /// this dispatchable. - #[pallet::weight(T::WeightInfo::set_code())] - pub fn set_code( - origin: OriginFor, - dest: AccountIdLookupOf, - code_hash: CodeHash, - ) -> DispatchResult { - ensure_root(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::try_mutate(&dest, |contract| { - let contract = if let Some(contract) = contract { - contract - } else { - return Err(>::ContractNotFound.into()) - }; - >::add_user(code_hash)?; - >::remove_user(contract.code_hash); - Self::deposit_event( - vec![T::Hashing::hash_of(&dest), code_hash, contract.code_hash], - Event::ContractCodeUpdated { - contract: dest.clone(), - new_code_hash: code_hash, - old_code_hash: contract.code_hash, - }, - ); - contract.code_hash = code_hash; - Ok(()) - }) - } - /// Makes a call to an account, optionally transferring some balance. /// /// # Parameters @@ -552,13 +435,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, - dest: AccountIdLookupOf, + dest: ::Source, #[pallet::compact] value: BalanceOf, - gas_limit: Weight, + #[pallet::compact] gas_limit: Weight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut output = Self::internal_call( @@ -569,7 +451,6 @@ pub mod pallet { storage_deposit_limit.map(Into::into), data, None, - Determinism::Deterministic, ); if let Ok(retval) = &output.result { if retval.did_revert() { @@ -612,7 +493,7 @@ pub mod pallet { pub fn instantiate_with_code( origin: OriginFor, #[pallet::compact] value: BalanceOf, - gas_limit: Weight, + #[pallet::compact] gas_limit: Weight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code: Vec, data: Vec, @@ -626,7 +507,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(code), + Code::Upload(Bytes(code)), data, salt, None, @@ -653,7 +534,7 @@ pub mod pallet { pub fn instantiate( origin: OriginFor, #[pallet::compact] value: BalanceOf, - gas_limit: Weight, + #[pallet::compact] gas_limit: Weight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code_hash: CodeHash, data: Vec, @@ -681,9 +562,87 @@ pub mod pallet { T::WeightInfo::instantiate(salt_len), ) } + + /// Upload new `code` without instantiating a contract from it. + /// + /// If the code does not already exist a deposit is reserved from the caller + /// and unreserved only when [`Self::remove_code`] is called. The size of the reserve + /// depends on the instrumented size of the the supplied `code`. + /// + /// If the code already exists in storage it will still return `Ok` and upgrades + /// the in storage version to the current + /// [`InstructionWeights::version`](InstructionWeights). + /// + /// # Note + /// + /// Anyone can instantiate a contract from any uploaded code and thus prevent its removal. + /// To avoid this situation a constructor could employ access control so that it can + /// only be instantiated by permissioned entities. The same is true when uploading + /// through [`Self::instantiate_with_code`]. + #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] + pub fn upload_code( + origin: OriginFor, + code: Vec, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into)).map(|_| ()) + } + + /// Remove the code stored under `code_hash` and refund the deposit to its owner. + /// + /// A code can only be removed by its original uploader (its owner) and only if it is + /// not used by any contract. + #[pallet::weight(T::WeightInfo::remove_code())] + pub fn remove_code( + origin: OriginFor, + code_hash: CodeHash, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + >::remove(&origin, code_hash)?; + // we waive the fee because removing unused code is beneficial + Ok(Pays::No.into()) + } + + /// Privileged function that changes the code of an existing contract. + /// + /// This takes care of updating refcounts and all other necessary operations. Returns + /// an error if either the `code_hash` or `dest` do not exist. + /// + /// # Note + /// + /// This does **not** change the address of the contract in question. This means + /// that the contract address is no longer derived from its code hash after calling + /// this dispatchable. + #[pallet::weight(T::WeightInfo::set_code())] + pub fn set_code( + origin: OriginFor, + dest: ::Source, + code_hash: CodeHash, + ) -> DispatchResult { + ensure_root(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::try_mutate(&dest, |contract| { + let contract = if let Some(contract) = contract { + contract + } else { + return Err(>::ContractNotFound.into()) + }; + >::add_user(code_hash)?; + >::remove_user(contract.code_hash); + Self::deposit_event(Event::ContractCodeUpdated { + contract: dest.clone(), + new_code_hash: code_hash, + old_code_hash: contract.code_hash, + }); + contract.code_hash = code_hash; + Ok(()) + }) + } } #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Contract deployed by address at the specified address. Instantiated { deployer: T::AccountId, contract: T::AccountId }, @@ -725,35 +684,6 @@ pub mod pallet { /// Previous code hash of the contract. old_code_hash: T::Hash, }, - - /// A contract was called either by a plain account or another contract. - /// - /// # Note - /// - /// Please keep in mind that like all events this is only emitted for successful - /// calls. This is because on failure all storage changes including events are - /// rolled back. - Called { - /// The account that called the `contract`. - caller: T::AccountId, - /// The contract that was called. - contract: T::AccountId, - }, - - /// A contract delegate called a code hash. - /// - /// # Note - /// - /// Please keep in mind that like all events this is only emitted for successful - /// calls. This is because on failure all storage changes including events are - /// rolled back. - DelegateCalled { - /// The contract that performed the delegate call and hence in whose context - /// the `code_hash` is executed. - contract: T::AccountId, - /// The code hash that was delegate called. - code_hash: CodeHash, - }, } #[pallet::error] @@ -833,8 +763,6 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// or in the debug buffer which is returned to RPC clients. CodeRejected, - /// An indetermistic code was used in a context where this is not permitted. - Indeterministic, } /// A mapping from an original code hash to the original code, untouched by instrumentation. @@ -931,7 +859,6 @@ where storage_deposit_limit: Option>, data: Vec, debug: bool, - determinism: Determinism, ) -> ContractExecResult> { let mut debug_message = if debug { Some(Vec::new()) } else { None }; let output = Self::internal_call( @@ -942,7 +869,6 @@ where storage_deposit_limit, data, debug_message.as_mut(), - determinism, ); ContractExecResult { result: output.result.map_err(|r| r.error), @@ -1006,11 +932,10 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: Option>, - determinism: Determinism, ) -> CodeUploadResult, BalanceOf> { let schedule = T::Schedule::get(); - let module = PrefabWasmModule::from_code(code, &schedule, origin, determinism) - .map_err(|(err, _)| err)?; + let module = + PrefabWasmModule::from_code(code, &schedule, origin).map_err(|(err, _)| err)?; let deposit = module.open_deposit(); if let Some(storage_deposit_limit) = storage_deposit_limit { ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); @@ -1044,11 +969,6 @@ where T::AddressGenerator::generate_address(deploying_address, code_hash, salt) } - /// Returns the code hash of the contract specified by `account` ID. - pub fn code_hash(account: &AccountIdOf) -> Option> { - Storage::::code_hash(account) - } - /// Store code for benchmarks which does not check nor instrument the code. #[cfg(feature = "runtime-benchmarks")] fn store_code_raw( @@ -1080,7 +1000,6 @@ where storage_deposit_limit: Option>, data: Vec, debug_message: Option<&mut Vec>, - determinism: Determinism, ) -> InternalCallOutput { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_meter = match StorageMeter::new(&origin, storage_deposit_limit, value) { @@ -1094,7 +1013,7 @@ where }; let schedule = T::Schedule::get(); let result = ExecStack::>::run_call( - origin.clone(), + origin, dest, &mut gas_meter, &mut storage_meter, @@ -1102,13 +1021,8 @@ where value, data, debug_message, - determinism, ); - InternalCallOutput { - result, - gas_meter, - storage_deposit: storage_meter.into_deposit(&origin), - } + InternalCallOutput { result, gas_meter, storage_deposit: storage_meter.into_deposit() } } /// Internal function that does the actual instantiation. @@ -1129,17 +1043,12 @@ where let try_exec = || { let schedule = T::Schedule::get(); let (extra_deposit, executable) = match code { - Code::Upload(binary) => { - let executable = PrefabWasmModule::from_code( - binary, - &schedule, - origin.clone(), - Determinism::Deterministic, - ) - .map_err(|(err, msg)| { - debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); - err - })?; + Code::Upload(Bytes(binary)) => { + let executable = PrefabWasmModule::from_code(binary, &schedule, origin.clone()) + .map_err(|(err, msg)| { + debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); + err + })?; // The open deposit will be charged during execution when the // uploaded module does not already exist. This deposit is not part of the // storage meter because it is not transferred to the contract but @@ -1157,7 +1066,7 @@ where value.saturating_add(extra_deposit), )?; let result = ExecStack::>::run_instantiate( - origin.clone(), + origin, executable, &mut gas_meter, &mut storage_meter, @@ -1168,88 +1077,10 @@ where debug_message, ); storage_deposit = storage_meter - .into_deposit(&origin) + .into_deposit() .saturating_add(&StorageDeposit::Charge(extra_deposit)); result }; InternalInstantiateOutput { result: try_exec(), gas_meter, storage_deposit } } - - /// Deposit a pallet contracts event. Handles the conversion to the overarching event type. - fn deposit_event(topics: Vec, event: Event) { - >::deposit_event_indexed( - &topics, - ::RuntimeEvent::from(event).into(), - ) - } - - /// Return the existential deposit of [`Config::Currency`]. - fn min_balance() -> BalanceOf { - >>::minimum_balance() - } - - /// Convert a 1D Weight to a 2D weight. - /// - /// Used by backwards compatible extrinsics. We cannot just set the proof to zero - /// or an old `Call` will just fail. - fn compat_weight(gas_limit: OldWeight) -> Weight { - Weight::from(gas_limit).set_proof_size(u64::from(T::MaxCodeLen::get()) * 2) - } -} - -sp_api::decl_runtime_apis! { - /// The API used to dry-run contract interactions. - #[api_version(2)] - pub trait ContractsApi where - AccountId: Codec, - Balance: Codec, - BlockNumber: Codec, - Hash: Codec, - { - /// Perform a call from a specified account to a given contract. - /// - /// See [`crate::Pallet::bare_call`]. - fn call( - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: Option, - storage_deposit_limit: Option, - input_data: Vec, - ) -> ContractExecResult; - - /// Instantiate a new contract. - /// - /// See `[crate::Pallet::bare_instantiate]`. - fn instantiate( - origin: AccountId, - value: Balance, - gas_limit: Option, - storage_deposit_limit: Option, - code: Code, - data: Vec, - salt: Vec, - ) -> ContractInstantiateResult; - - - /// Upload new code without instantiating a contract from it. - /// - /// See [`crate::Pallet::bare_upload_code`]. - fn upload_code( - origin: AccountId, - code: Vec, - storage_deposit_limit: Option, - determinism: Determinism, - ) -> CodeUploadResult; - - /// Query a given storage key in a given contract. - /// - /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the - /// specified account and `Ok(None)` if it doesn't. If the account specified by the address - /// doesn't exist, or doesn't have a contract then `Err` is returned. - fn get_storage( - address: AccountId, - key: Vec, - ) -> GetStorageResult; - } } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index aa04d8b9b1084..19e699a855461 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -18,79 +18,47 @@ use crate::{BalanceOf, CodeHash, Config, Pallet, TrieId, Weight}; use codec::{Decode, Encode}; use frame_support::{ - codec, - pallet_prelude::*, - storage::migration, - storage_alias, - traits::{Get, OnRuntimeUpgrade}, - Identity, Twox64Concat, + codec, pallet_prelude::*, storage::migration, storage_alias, traits::Get, Identity, + Twox64Concat, }; -use sp_runtime::traits::Saturating; use sp_std::{marker::PhantomData, prelude::*}; -/// Performs all necessary migrations based on `StorageVersion`. -pub struct Migration(PhantomData); -impl OnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - let version = >::on_chain_storage_version(); - let mut weight = Weight::zero(); +/// Wrapper for all migrations of this pallet, based on `StorageVersion`. +pub fn migrate() -> Weight { + let version = StorageVersion::get::>(); + let mut weight: Weight = 0; - if version < 4 { - v4::migrate::(&mut weight); - } - - if version < 5 { - v5::migrate::(&mut weight); - } - - if version < 6 { - v6::migrate::(&mut weight); - } - - if version < 7 { - v7::migrate::(&mut weight); - } - - if version < 8 { - v8::migrate::(&mut weight); - } - - if version < 9 { - v9::migrate::(&mut weight); - } - - StorageVersion::new(9).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight + if version < 4 { + weight = weight.saturating_add(v4::migrate::()); + StorageVersion::new(4).put::>(); } - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let version = >::on_chain_storage_version(); - - if version == 8 { - v8::pre_upgrade::()?; - } + if version < 5 { + weight = weight.saturating_add(v5::migrate::()); + StorageVersion::new(5).put::>(); + } - Ok(version.encode()) + if version < 6 { + weight = weight.saturating_add(v6::migrate::()); + StorageVersion::new(6).put::>(); } - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let version = Decode::decode(&mut state.as_ref()).map_err(|_| "Cannot decode version")?; - post_checks::post_upgrade::(version) + if version < 7 { + weight = weight.saturating_add(v7::migrate::()); + StorageVersion::new(7).put::>(); } + + weight } /// V4: `Schedule` is changed to be a config item rather than an in-storage value. mod v4 { use super::*; - pub fn migrate(weight: &mut Weight) { + pub fn migrate() -> Weight { #[allow(deprecated)] migration::remove_storage_prefix(>::name().as_bytes(), b"CurrentSchedule", b""); - weight.saturating_accrue(T::DbWeight::get().writes(1)); + T::DbWeight::get().writes(1) } } @@ -158,9 +126,11 @@ mod v5 { #[storage_alias] type DeletionQueue = StorageValue, Vec>; - pub fn migrate(weight: &mut Weight) { + pub fn migrate() -> Weight { + let mut weight: Weight = 0; + >::translate(|_key, old: OldContractInfo| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); match old { OldContractInfo::Alive(old) => Some(ContractInfo:: { trie_id: old.trie_id, @@ -172,10 +142,12 @@ mod v5 { }); DeletionQueue::::translate(|old: Option>| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); old.map(|old| old.into_iter().map(|o| DeletedContract { trie_id: o.trie_id }).collect()) }) .ok(); + + weight } } @@ -199,23 +171,23 @@ mod v6 { } #[derive(Encode, Decode)] - pub struct PrefabWasmModule { + struct PrefabWasmModule { #[codec(compact)] - pub instruction_weights_version: u32, + instruction_weights_version: u32, #[codec(compact)] - pub initial: u32, + initial: u32, #[codec(compact)] - pub maximum: u32, - pub code: Vec, + maximum: u32, + code: Vec, } use v5::ContractInfo as OldContractInfo; #[derive(Encode, Decode)] pub struct RawContractInfo { - pub trie_id: TrieId, - pub code_hash: CodeHash, - pub storage_deposit: Balance, + trie_id: TrieId, + code_hash: CodeHash, + storage_deposit: Balance, } #[derive(Encode, Decode)] @@ -227,7 +199,7 @@ mod v6 { refcount: u64, } - pub type ContractInfo = RawContractInfo, BalanceOf>; + type ContractInfo = RawContractInfo, BalanceOf>; #[storage_alias] type ContractInfoOf = StorageMap< @@ -243,9 +215,11 @@ mod v6 { #[storage_alias] type OwnerInfoOf = StorageMap, Identity, CodeHash, OwnerInfo>; - pub fn migrate(weight: &mut Weight) { + pub fn migrate() -> Weight { + let mut weight: Weight = 0; + >::translate(|_key, old: OldContractInfo| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); Some(ContractInfo:: { trie_id: old.trie_id, code_hash: old.code_hash, @@ -257,7 +231,7 @@ mod v6 { .expect("Infinite input; no dead input space; qed"); >::translate(|key, old: OldPrefabWasmModule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)); >::insert( key, OwnerInfo { @@ -273,6 +247,8 @@ mod v6 { code: old.code, }) }); + + weight } } @@ -280,194 +256,13 @@ mod v6 { mod v7 { use super::*; - pub fn migrate(weight: &mut Weight) { + pub fn migrate() -> Weight { #[storage_alias] type AccountCounter = StorageValue, u64, ValueQuery>; #[storage_alias] type Nonce = StorageValue, u64, ValueQuery>; Nonce::::set(AccountCounter::::take()); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)) - } -} - -/// Update `ContractInfo` with new fields that track storage deposits. -mod v8 { - use super::*; - use sp_io::default_child_storage as child; - use v6::ContractInfo as OldContractInfo; - - #[derive(Encode, Decode)] - pub struct ContractInfo { - pub trie_id: TrieId, - pub code_hash: CodeHash, - pub storage_bytes: u32, - pub storage_items: u32, - pub storage_byte_deposit: BalanceOf, - pub storage_item_deposit: BalanceOf, - pub storage_base_deposit: BalanceOf, - } - - #[storage_alias] - type ContractInfoOf = - StorageMap, Twox64Concat, ::AccountId, V>; - - pub fn migrate(weight: &mut Weight) { - >>::translate_values(|old: OldContractInfo| { - // Count storage items of this contract - let mut storage_bytes = 0u32; - let mut storage_items = 0u32; - let mut key = Vec::new(); - while let Some(next) = child::next_key(&old.trie_id, &key) { - key = next; - let mut val_out = []; - let len = child::read(&old.trie_id, &key, &mut val_out, 0) - .expect("The loop conditions checks for existence of the key; qed"); - storage_bytes.saturating_accrue(len); - storage_items.saturating_accrue(1); - } - - let storage_byte_deposit = - T::DepositPerByte::get().saturating_mul(storage_bytes.into()); - let storage_item_deposit = - T::DepositPerItem::get().saturating_mul(storage_items.into()); - let storage_base_deposit = old - .storage_deposit - .saturating_sub(storage_byte_deposit) - .saturating_sub(storage_item_deposit); - - // Reads: One read for each storage item plus the contract info itself. - // Writes: Only the new contract info. - weight.saturating_accrue( - T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1), - ); - - Some(ContractInfo { - trie_id: old.trie_id, - code_hash: old.code_hash, - storage_bytes, - storage_items, - storage_byte_deposit, - storage_item_deposit, - storage_base_deposit, - }) - }); - } - - #[cfg(feature = "try-runtime")] - pub fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::ReservableCurrency; - for (key, value) in ContractInfoOf::>::iter() { - let reserved = T::Currency::reserved_balance(&key); - ensure!(reserved >= value.storage_deposit, "Reserved balance out of sync."); - } - Ok(()) - } -} - -/// Update `CodeStorage` with the new `determinism` field. -mod v9 { - use super::*; - use crate::Determinism; - use v6::PrefabWasmModule as OldPrefabWasmModule; - - #[derive(Encode, Decode)] - pub struct PrefabWasmModule { - #[codec(compact)] - pub instruction_weights_version: u32, - #[codec(compact)] - pub initial: u32, - #[codec(compact)] - pub maximum: u32, - pub code: Vec, - pub determinism: Determinism, - } - - #[storage_alias] - type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; - - pub fn migrate(weight: &mut Weight) { - >::translate_values(|old: OldPrefabWasmModule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - Some(PrefabWasmModule { - instruction_weights_version: old.instruction_weights_version, - initial: old.initial, - maximum: old.maximum, - code: old.code, - determinism: Determinism::Deterministic, - }) - }); - } -} - -// Post checks always need to be run against the latest storage version. This is why we -// do not scope them in the per version modules. They always need to be ported to the latest -// version. -#[cfg(feature = "try-runtime")] -mod post_checks { - use super::*; - use crate::Determinism; - use sp_io::default_child_storage as child; - use v8::ContractInfo; - use v9::PrefabWasmModule; - - #[storage_alias] - type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; - - #[storage_alias] - type ContractInfoOf = - StorageMap, Twox64Concat, ::AccountId, V>; - - pub fn post_upgrade(old_version: StorageVersion) -> Result<(), &'static str> { - if old_version < 7 { - return Ok(()) - } - - if old_version < 8 { - v8::()?; - } - - if old_version < 9 { - v9::()?; - } - - Ok(()) - } - - fn v8() -> Result<(), &'static str> { - use frame_support::traits::ReservableCurrency; - for (key, value) in ContractInfoOf::>::iter() { - let reserved = T::Currency::reserved_balance(&key); - let stored = value - .storage_base_deposit - .saturating_add(value.storage_byte_deposit) - .saturating_add(value.storage_item_deposit); - ensure!(reserved >= stored, "Reserved balance out of sync."); - - let mut storage_bytes = 0u32; - let mut storage_items = 0u32; - let mut key = Vec::new(); - while let Some(next) = child::next_key(&value.trie_id, &key) { - key = next; - let mut val_out = []; - let len = child::read(&value.trie_id, &key, &mut val_out, 0) - .expect("The loop conditions checks for existence of the key; qed"); - storage_bytes.saturating_accrue(len); - storage_items.saturating_accrue(1); - } - ensure!(storage_bytes == value.storage_bytes, "Storage bytes do not match.",); - ensure!(storage_items == value.storage_items, "Storage items do not match.",); - } - Ok(()) - } - - fn v9() -> Result<(), &'static str> { - for value in CodeStorage::::iter_values() { - ensure!( - value.determinism == Determinism::Deterministic, - "All pre-existing codes need to be deterministic." - ); - } - Ok(()) + T::DbWeight::get().reads_writes(1, 2) } } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 535517e756c61..907ce9e088648 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -18,10 +18,10 @@ //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{wasm::Determinism, weights::WeightInfo, Config}; +use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; -use frame_support::DefaultNoBound; +use frame_support::{weights::Weight, DefaultNoBound}; use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -193,13 +193,6 @@ pub struct InstructionWeights { /// Changes to other parts of the schedule should not increment the version in /// order to avoid unnecessary re-instrumentations. pub version: u32, - /// Weight to be used for instructions which don't have benchmarks assigned. - /// - /// This weight is used whenever a code is uploaded with [`Determinism::AllowIndeterminism`] - /// and an instruction (usually a float instruction) is encountered. This weight is **not** - /// used if a contract is uploaded with [`Determinism::Deterministic`]. If this field is set to - /// `0` (the default) only deterministic codes are allowed to be uploaded. - pub fallback: u32, pub i64const: u32, pub i64load: u32, pub i64store: u32, @@ -262,166 +255,166 @@ pub struct InstructionWeights { #[scale_info(skip_type_params(T))] pub struct HostFnWeights { /// Weight of calling `seal_caller`. - pub caller: u64, + pub caller: Weight, /// Weight of calling `seal_is_contract`. - pub is_contract: u64, + pub is_contract: Weight, /// Weight of calling `seal_code_hash`. - pub code_hash: u64, + pub code_hash: Weight, /// Weight of calling `seal_own_code_hash`. - pub own_code_hash: u64, + pub own_code_hash: Weight, /// Weight of calling `seal_caller_is_origin`. - pub caller_is_origin: u64, + pub caller_is_origin: Weight, /// Weight of calling `seal_address`. - pub address: u64, + pub address: Weight, /// Weight of calling `seal_gas_left`. - pub gas_left: u64, + pub gas_left: Weight, /// Weight of calling `seal_balance`. - pub balance: u64, + pub balance: Weight, /// Weight of calling `seal_value_transferred`. - pub value_transferred: u64, + pub value_transferred: Weight, /// Weight of calling `seal_minimum_balance`. - pub minimum_balance: u64, + pub minimum_balance: Weight, /// Weight of calling `seal_block_number`. - pub block_number: u64, + pub block_number: Weight, /// Weight of calling `seal_now`. - pub now: u64, + pub now: Weight, /// Weight of calling `seal_weight_to_fee`. - pub weight_to_fee: u64, + pub weight_to_fee: Weight, /// Weight of calling `gas`. - pub gas: u64, + pub gas: Weight, /// Weight of calling `seal_input`. - pub input: u64, + pub input: Weight, /// Weight per input byte copied to contract memory by `seal_input`. - pub input_per_byte: u64, + pub input_per_byte: Weight, /// Weight of calling `seal_return`. - pub r#return: u64, + pub r#return: Weight, /// Weight per byte returned through `seal_return`. - pub return_per_byte: u64, + pub return_per_byte: Weight, /// Weight of calling `seal_terminate`. - pub terminate: u64, + pub terminate: Weight, /// Weight of calling `seal_random`. - pub random: u64, + pub random: Weight, /// Weight of calling `seal_reposit_event`. - pub deposit_event: u64, + pub deposit_event: Weight, /// Weight per topic supplied to `seal_deposit_event`. - pub deposit_event_per_topic: u64, + pub deposit_event_per_topic: Weight, /// Weight per byte of an event deposited through `seal_deposit_event`. - pub deposit_event_per_byte: u64, + pub deposit_event_per_byte: Weight, /// Weight of calling `seal_debug_message`. - pub debug_message: u64, + pub debug_message: Weight, /// Weight of calling `seal_set_storage`. - pub set_storage: u64, + pub set_storage: Weight, /// Weight per written byten of an item stored with `seal_set_storage`. - pub set_storage_per_new_byte: u64, + pub set_storage_per_new_byte: Weight, /// Weight per overwritten byte of an item stored with `seal_set_storage`. - pub set_storage_per_old_byte: u64, + pub set_storage_per_old_byte: Weight, /// Weight of calling `seal_set_code_hash`. - pub set_code_hash: u64, + pub set_code_hash: Weight, /// Weight of calling `seal_clear_storage`. - pub clear_storage: u64, + pub clear_storage: Weight, /// Weight of calling `seal_clear_storage` per byte of the stored item. - pub clear_storage_per_byte: u64, + pub clear_storage_per_byte: Weight, /// Weight of calling `seal_contains_storage`. - pub contains_storage: u64, + pub contains_storage: Weight, /// Weight of calling `seal_contains_storage` per byte of the stored item. - pub contains_storage_per_byte: u64, + pub contains_storage_per_byte: Weight, /// Weight of calling `seal_get_storage`. - pub get_storage: u64, + pub get_storage: Weight, /// Weight per byte of an item received via `seal_get_storage`. - pub get_storage_per_byte: u64, + pub get_storage_per_byte: Weight, /// Weight of calling `seal_take_storage`. - pub take_storage: u64, + pub take_storage: Weight, /// Weight per byte of an item received via `seal_take_storage`. - pub take_storage_per_byte: u64, + pub take_storage_per_byte: Weight, /// Weight of calling `seal_transfer`. - pub transfer: u64, + pub transfer: Weight, /// Weight of calling `seal_call`. - pub call: u64, + pub call: Weight, /// Weight of calling `seal_delegate_call`. - pub delegate_call: u64, + pub delegate_call: Weight, /// Weight surcharge that is claimed if `seal_call` does a balance transfer. - pub call_transfer_surcharge: u64, + pub call_transfer_surcharge: Weight, /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. - pub call_per_cloned_byte: u64, + pub call_per_cloned_byte: Weight, /// Weight of calling `seal_instantiate`. - pub instantiate: u64, + pub instantiate: Weight, /// Weight surcharge that is claimed if `seal_instantiate` does a balance transfer. - pub instantiate_transfer_surcharge: u64, + pub instantiate_transfer_surcharge: Weight, /// Weight per salt byte supplied to `seal_instantiate`. - pub instantiate_per_salt_byte: u64, + pub instantiate_per_salt_byte: Weight, /// Weight of calling `seal_hash_sha_256`. - pub hash_sha2_256: u64, + pub hash_sha2_256: Weight, /// Weight per byte hashed by `seal_hash_sha_256`. - pub hash_sha2_256_per_byte: u64, + pub hash_sha2_256_per_byte: Weight, /// Weight of calling `seal_hash_keccak_256`. - pub hash_keccak_256: u64, + pub hash_keccak_256: Weight, /// Weight per byte hashed by `seal_hash_keccak_256`. - pub hash_keccak_256_per_byte: u64, + pub hash_keccak_256_per_byte: Weight, /// Weight of calling `seal_hash_blake2_256`. - pub hash_blake2_256: u64, + pub hash_blake2_256: Weight, /// Weight per byte hashed by `seal_hash_blake2_256`. - pub hash_blake2_256_per_byte: u64, + pub hash_blake2_256_per_byte: Weight, /// Weight of calling `seal_hash_blake2_128`. - pub hash_blake2_128: u64, + pub hash_blake2_128: Weight, /// Weight per byte hashed by `seal_hash_blake2_128`. - pub hash_blake2_128_per_byte: u64, + pub hash_blake2_128_per_byte: Weight, /// Weight of calling `seal_ecdsa_recover`. - pub ecdsa_recover: u64, + pub ecdsa_recover: Weight, /// Weight of calling `seal_ecdsa_to_eth_address`. - pub ecdsa_to_eth_address: u64, + pub ecdsa_to_eth_address: Weight, /// The type parameter is used in the default implementation. #[codec(skip)] @@ -442,19 +435,19 @@ macro_rules! call_zero { macro_rules! cost_args { ($name:ident, $( $arg: expr ),+) => { - (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))).ref_time() + (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))) } } macro_rules! cost_batched_args { ($name:ident, $( $arg: expr ),+) => { - cost_args!($name, $( $arg ),+) / u64::from(API_BENCHMARK_BATCH_SIZE) + cost_args!($name, $( $arg ),+) / Weight::from(API_BENCHMARK_BATCH_SIZE) } } macro_rules! cost_instr_no_params_with_batch_size { ($name:ident, $batch_size:expr) => { - (cost_args!($name, 1) / u64::from($batch_size)) as u32 + (cost_args!($name, 1) / Weight::from($batch_size)) as u32 }; } @@ -532,8 +525,7 @@ impl Default for InstructionWeights { fn default() -> Self { let max_pages = Limits::default().memory_pages; Self { - version: 3, - fallback: 0, + version: 2, i64const: cost_instr!(instr_i64const, 1), i64load: cost_instr!(instr_i64load, 2), i64store: cost_instr!(instr_i64store, 2), @@ -667,15 +659,10 @@ impl Default for HostFnWeights { struct ScheduleRules<'a, T: Config> { schedule: &'a Schedule, params: Vec, - determinism: Determinism, } impl Schedule { - pub(crate) fn rules( - &self, - module: &elements::Module, - determinism: Determinism, - ) -> impl gas_metering::Rules + '_ { + pub(crate) fn rules(&self, module: &elements::Module) -> impl gas_metering::Rules + '_ { ScheduleRules { schedule: self, params: module @@ -687,7 +674,6 @@ impl Schedule { func.params().len() as u32 }) .collect(), - determinism, } } } @@ -770,10 +756,7 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { I32Rotr | I64Rotr => w.i64rotr, // Returning None makes the gas instrumentation fail which we intend for - // unsupported or unknown instructions. Offchain we might allow indeterminism and hence - // use the fallback weight for those instructions. - _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => - w.fallback, + // unsupported or unknown instructions. _ => return None, }; Some(weight) diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index c7644e696196f..01c809da8675e 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -34,51 +34,31 @@ use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; use sp_io::KillStorageResult; use sp_runtime::{ - traits::{Hash, Saturating, Zero}, + traits::{Hash, Zero}, RuntimeDebug, }; use sp_std::{marker::PhantomData, prelude::*}; +pub type ContractInfo = RawContractInfo, BalanceOf>; + /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -#[scale_info(skip_type_params(T))] -pub struct ContractInfo { +pub struct RawContractInfo { /// Unique ID for the subtree encoded as a bytes vector. pub trie_id: TrieId, /// The code associated with a given account. - pub code_hash: CodeHash, - /// How many bytes of storage are accumulated in this contract's child trie. - pub storage_bytes: u32, - /// How many items of storage are accumulated in this contract's child trie. - pub storage_items: u32, - /// This records to how much deposit the accumulated `storage_bytes` amount to. - pub storage_byte_deposit: BalanceOf, - /// This records to how much deposit the accumulated `storage_items` amount to. - pub storage_item_deposit: BalanceOf, - /// This records how much deposit is put down in order to pay for the contract itself. - /// - /// We need to store this information separately so it is not used when calculating any refunds - /// since the base deposit can only ever be refunded on contract termination. - pub storage_base_deposit: BalanceOf, + pub code_hash: CodeHash, + /// The amount of balance that is currently deposited to pay for consumed storage. + pub storage_deposit: Balance, } -impl ContractInfo { +impl RawContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. #[cfg(test)] pub fn child_trie_info(&self) -> ChildInfo { child_trie_info(&self.trie_id[..]) } - - /// The deposit paying for the accumulated storage generated within the contract's child trie. - pub fn extra_deposit(&self) -> BalanceOf { - self.storage_byte_deposit.saturating_add(self.storage_item_deposit) - } - - /// Same as [`Self::extra_deposit`] but including the base deposit. - pub fn total_deposit(&self) -> BalanceOf { - self.extra_deposit().saturating_add(self.storage_base_deposit) - } } /// Associated child trie unique id is built from the hash part of the trie id. @@ -198,7 +178,7 @@ where }, (None, None) => (), } - storage_meter.charge(&diff); + storage_meter.charge(&diff)?; } match &new_value { @@ -220,21 +200,14 @@ where pub fn new_contract( account: &AccountIdOf, trie_id: TrieId, - code_hash: CodeHash, + ch: CodeHash, ) -> Result, DispatchError> { if >::contains_key(account) { return Err(Error::::DuplicateContract.into()) } - let contract = ContractInfo:: { - code_hash, - trie_id, - storage_bytes: 0, - storage_items: 0, - storage_byte_deposit: Zero::zero(), - storage_item_deposit: Zero::zero(), - storage_base_deposit: Zero::zero(), - }; + let contract = + ContractInfo:: { code_hash: ch, trie_id, storage_deposit: >::zero() }; Ok(contract) } @@ -254,10 +227,9 @@ where let base_weight = T::WeightInfo::on_process_deletion_queue_batch(); let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - T::WeightInfo::on_initialize_per_queue_item(0); - let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - - T::WeightInfo::on_initialize_per_trie_key(0)) - .ref_time(); - let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as u64); + let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0); + let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as Weight); // `weight_per_key` being zero makes no sense and would constitute a failure to // benchmark properly. We opt for not removing any keys at all in this case. @@ -265,8 +237,7 @@ where .saturating_sub(base_weight) .saturating_sub(decoding_weight) .checked_div(weight_per_key) - .unwrap_or(Weight::zero()) - .ref_time() as u32; + .unwrap_or(0) as u32; (weight_per_key, key_budget) } @@ -277,7 +248,7 @@ where pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { - return Weight::zero() + return 0 } let (weight_per_key, mut remaining_key_budget) = @@ -311,10 +282,7 @@ where } >::put(queue); - let ref_time_weight = weight_limit - .ref_time() - .saturating_sub(weight_per_key.saturating_mul(u64::from(remaining_key_budget))); - Weight::from_ref_time(ref_time_weight) + weight_limit.saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as Weight)) } /// Generates a unique trie id by returning `hash(account_id ++ nonce)`. @@ -328,6 +296,7 @@ where } /// Returns the code hash of the contract specified by `account` ID. + #[cfg(test)] pub fn code_hash(account: &AccountIdOf) -> Option> { >::get(account).map(|i| i.code_hash) } @@ -339,7 +308,7 @@ where let queue: Vec = (0..T::DeletionQueueDepth::get()) .map(|_| DeletedContract { trie_id: TrieId::default() }) .collect(); - let bounded: BoundedVec<_, _> = queue.try_into().map_err(|_| ()).unwrap(); + let bounded: BoundedVec<_, _> = queue.try_into().unwrap(); >::put(bounded); } } diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index 0a63eb42b86cb..b06f7ea4aedb5 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -17,24 +17,17 @@ //! This module contains functions to meter the storage deposit. -use crate::{storage::ContractInfo, BalanceOf, Config, Error, Inspect, Pallet}; +use crate::{storage::ContractInfo, BalanceOf, Config, Error}; use codec::Encode; use frame_support::{ dispatch::DispatchError, - ensure, - traits::{ - tokens::{BalanceStatus, WithdrawConsequence}, - Currency, ExistenceRequirement, Get, ReservableCurrency, - }, - DefaultNoBound, RuntimeDebugNoBound, + traits::{tokens::BalanceStatus, Currency, ExistenceRequirement, Get, ReservableCurrency}, + DefaultNoBound, }; use pallet_contracts_primitives::StorageDeposit as Deposit; use sp_core::crypto::UncheckedFrom; -use sp_runtime::{ - traits::{Saturating, Zero}, - FixedPointNumber, FixedU128, -}; -use sp_std::{marker::PhantomData, vec::Vec}; +use sp_runtime::traits::{Saturating, Zero}; +use sp_std::marker::PhantomData; /// Deposit that uses the native currency's balance type. pub type DepositOf = Deposit>; @@ -106,25 +99,22 @@ impl State for Root {} impl State for Nested {} /// A type that allows the metering of consumed or freed storage of a single contract call stack. -#[derive(DefaultNoBound, RuntimeDebugNoBound)] -pub struct RawMeter { +#[derive(DefaultNoBound)] +pub struct RawMeter, S: State> { /// The limit of how much balance this meter is allowed to consume. limit: BalanceOf, /// The amount of balance that was used in this meter and all of its already absorbed children. total_deposit: DepositOf, - /// The amount of storage changes that were recorded in this meter alone. - own_contribution: Contribution, - /// List of charges that should be applied at the end of a contract stack execution. - /// - /// We only have one charge per contract hence the size of this vector is - /// limited by the maximum call depth. - charges: Vec>, + /// The amount of balance that was used in this meter alone. + own_deposit: DepositOf, + /// Only when a contract was terminated we allow it to drop below the minimum balance. + terminated: bool, /// Type parameters are only used in impls. _phantom: PhantomData<(E, S)>, } /// This type is used to describe a storage change when charging from the meter. -#[derive(Default, RuntimeDebugNoBound)] +#[derive(Default)] pub struct Diff { /// How many bytes were added to storage. pub bytes_added: u32, @@ -134,120 +124,43 @@ pub struct Diff { pub items_added: u32, /// How many storage items were removed from storage. pub items_removed: u32, + /// If set to true the derived deposit will always a `Charge` larger than the + /// the existential deposit. + pub require_ed: bool, } impl Diff { - /// Calculate how much of a charge or refund results from applying the diff and store it - /// in the passed `info` if any. - /// - /// # Note - /// - /// In case `None` is passed for `info` only charges are calculated. This is because refunds - /// are calculated pro rata of the existing storage within a contract and hence need extract - /// this information from the passed `info`. - pub fn update_contract(&self, info: Option<&mut ContractInfo>) -> DepositOf { + /// Calculate how much of a charge or refund results from applying the diff. + pub fn to_deposit(&self) -> DepositOf { + let mut deposit = Deposit::default(); let per_byte = T::DepositPerByte::get(); let per_item = T::DepositPerItem::get(); - let bytes_added = self.bytes_added.saturating_sub(self.bytes_removed); - let items_added = self.items_added.saturating_sub(self.items_removed); - let mut bytes_deposit = Deposit::Charge(per_byte.saturating_mul((bytes_added).into())); - let mut items_deposit = Deposit::Charge(per_item.saturating_mul((items_added).into())); - - // Without any contract info we can only calculate diffs which add storage - let info = if let Some(info) = info { - info - } else { - debug_assert_eq!(self.bytes_removed, 0); - debug_assert_eq!(self.items_removed, 0); - return bytes_deposit.saturating_add(&items_deposit) - }; - // Refunds are calculated pro rata based on the accumulated storage within the contract - let bytes_removed = self.bytes_removed.saturating_sub(self.bytes_added); - let items_removed = self.items_removed.saturating_sub(self.items_added); - let ratio = FixedU128::checked_from_rational(bytes_removed, info.storage_bytes) - .unwrap_or_default() - .min(FixedU128::from_u32(1)); - bytes_deposit = bytes_deposit - .saturating_add(&Deposit::Refund(ratio.saturating_mul_int(info.storage_byte_deposit))); - let ratio = FixedU128::checked_from_rational(items_removed, info.storage_items) - .unwrap_or_default() - .min(FixedU128::from_u32(1)); - items_deposit = items_deposit - .saturating_add(&Deposit::Refund(ratio.saturating_mul_int(info.storage_item_deposit))); - - // We need to update the contract info structure with the new deposits - info.storage_bytes = - info.storage_bytes.saturating_add(bytes_added).saturating_sub(bytes_removed); - info.storage_items = - info.storage_items.saturating_add(items_added).saturating_sub(items_removed); - match &bytes_deposit { - Deposit::Charge(amount) => - info.storage_byte_deposit = info.storage_byte_deposit.saturating_add(*amount), - Deposit::Refund(amount) => - info.storage_byte_deposit = info.storage_byte_deposit.saturating_sub(*amount), - } - match &items_deposit { - Deposit::Charge(amount) => - info.storage_item_deposit = info.storage_item_deposit.saturating_add(*amount), - Deposit::Refund(amount) => - info.storage_item_deposit = info.storage_item_deposit.saturating_sub(*amount), + if self.bytes_added > self.bytes_removed { + deposit = deposit.saturating_add(&Deposit::Charge( + per_byte.saturating_mul((self.bytes_added - self.bytes_removed).into()), + )); + } else if self.bytes_removed > self.bytes_added { + deposit = deposit.saturating_add(&Deposit::Refund( + per_byte.saturating_mul((self.bytes_removed - self.bytes_added).into()), + )); } - bytes_deposit.saturating_add(&items_deposit) - } -} - -impl Diff { - fn saturating_add(&self, rhs: &Self) -> Self { - Self { - bytes_added: self.bytes_added.saturating_add(rhs.bytes_added), - bytes_removed: self.bytes_removed.saturating_add(rhs.bytes_removed), - items_added: self.items_added.saturating_add(rhs.items_added), - items_removed: self.items_removed.saturating_add(rhs.items_removed), + if self.items_added > self.items_removed { + deposit = deposit.saturating_add(&Deposit::Charge( + per_item.saturating_mul((self.items_added - self.items_removed).into()), + )); + } else if self.items_removed > self.items_added { + deposit = deposit.saturating_add(&Deposit::Refund( + per_item.saturating_mul((self.items_removed - self.items_added).into()), + )); } - } -} - -/// Records information to charge or refund a plain account. -/// -/// All the charges are deferred to the end of a whole call stack. Reason is that by doing -/// this we can do all the refunds before doing any charge. This way a plain account can use -/// more deposit than it has balance as along as it is covered by a refund. This -/// essentially makes the order of storage changes irrelevant with regard to the deposit system. -#[derive(RuntimeDebugNoBound, Clone)] -struct Charge { - contract: T::AccountId, - amount: DepositOf, - terminated: bool, -} -/// Records the storage changes of a storage meter. -#[derive(RuntimeDebugNoBound)] -enum Contribution { - /// The contract the meter belongs to is alive and accumulates changes using a [`Diff`]. - Alive(Diff), - /// The meter was checked against its limit using [`RawMeter::enforce_limit`] at the end of - /// its execution. In this process the [`Diff`] was converted into a [`Deposit`]. - Checked(DepositOf), - /// The contract was terminated. In this process the [`Diff`] was converted into a [`Deposit`] - /// in order to calculate the refund. - Terminated(DepositOf), -} - -impl Contribution { - /// See [`Diff::update_contract`]. - fn update_contract(&self, info: Option<&mut ContractInfo>) -> DepositOf { - match self { - Self::Alive(diff) => diff.update_contract::(info), - Self::Terminated(deposit) | Self::Checked(deposit) => deposit.clone(), + if self.require_ed { + deposit = deposit.max(Deposit::Charge(T::Currency::minimum_balance())) } - } -} -impl Default for Contribution { - fn default() -> Self { - Self::Alive(Default::default()) + deposit } } @@ -265,7 +178,6 @@ where /// usage for this sub call separately. This is necessary because we want to exchange balance /// with the current contract we are interacting with. pub fn nested(&self) -> RawMeter { - debug_assert!(self.is_alive()); RawMeter { limit: self.available(), ..Default::default() } } @@ -280,28 +192,45 @@ where /// /// # Parameters /// - /// - `absorbed`: The child storage meter that should be absorbed. - /// - `origin`: The origin that spawned the original root meter. - /// - `contract`: The contract that this sub call belongs to. - /// - `info`: The info of the contract in question. `None` if the contract was terminated. + /// `absorbed`: The child storage meter that should be absorbed. + /// `origin`: The origin that spawned the original root meter. + /// `contract`: The contract that this sub call belongs to. + /// `info`: The info of the contract in question. `None` if the contract was terminated. pub fn absorb( &mut self, - absorbed: RawMeter, + mut absorbed: RawMeter, + origin: &T::AccountId, contract: &T::AccountId, info: Option<&mut ContractInfo>, ) { - let own_deposit = absorbed.own_contribution.update_contract(info); - self.total_deposit = self - .total_deposit - .saturating_add(&absorbed.total_deposit) - .saturating_add(&own_deposit); - if !own_deposit.is_zero() { - self.charges.extend_from_slice(&absorbed.charges); - self.charges.push(Charge { - contract: contract.clone(), - amount: own_deposit, - terminated: absorbed.is_terminated(), - }); + // Absorbing from an existing (non terminated) contract. + if let Some(info) = info { + match &mut absorbed.own_deposit { + Deposit::Charge(amount) => + info.storage_deposit = info.storage_deposit.saturating_add(*amount), + Deposit::Refund(amount) => { + // We need to make sure to never refund more than what was deposited and + // still leave the existential deposit inside the contract's account. + // This case can happen when costs change due to a runtime upgrade where + // increased costs could remove an account due to refunds. + let amount = { + let corrected_amount = (*amount).min( + info.storage_deposit.saturating_sub(T::Currency::minimum_balance()), + ); + let correction = (*amount).saturating_sub(corrected_amount); + absorbed.total_deposit = + absorbed.total_deposit.saturating_sub(&Deposit::Refund(correction)); + *amount = corrected_amount; + corrected_amount + }; + info.storage_deposit = info.storage_deposit.saturating_sub(amount); + }, + } + } + + self.total_deposit = self.total_deposit.saturating_add(&absorbed.total_deposit); + if !absorbed.own_deposit.is_zero() { + E::charge(origin, contract, &absorbed.own_deposit, absorbed.terminated); } } @@ -309,16 +238,6 @@ where fn available(&self) -> BalanceOf { self.total_deposit.available(&self.limit) } - - /// True if the contract is alive. - fn is_alive(&self) -> bool { - matches!(self.own_contribution, Contribution::Alive(_)) - } - - /// True if the contract is terminated. - fn is_terminated(&self) -> bool { - matches!(self.own_contribution, Contribution::Terminated(_)) - } } /// Functions that only apply to the root state. @@ -341,18 +260,11 @@ where } /// The total amount of deposit that should change hands as result of the execution - /// that this meter was passed into. This will also perform all the charges accumulated - /// in the whole contract stack. + /// that this meter was passed into. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn into_deposit(self, origin: &T::AccountId) -> DepositOf { - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, charge.terminated); - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, charge.terminated); - } + pub fn into_deposit(self) -> DepositOf { self.total_deposit } } @@ -365,12 +277,18 @@ where E: Ext, { /// Try to charge the `diff` from the meter. Fails if this would exceed the original limit. - pub fn charge(&mut self, diff: &Diff) { - debug_assert!(self.is_alive()); - match &mut self.own_contribution { - Contribution::Alive(own) => *own = own.saturating_add(diff), - _ => panic!("Charge is never called after termination; qed"), - }; + pub fn charge(&mut self, diff: &Diff) -> Result, DispatchError> { + debug_assert!(!self.terminated); + let deposit = diff.to_deposit::(); + let total_deposit = self.total_deposit.saturating_add(&deposit); + if let Deposit::Charge(amount) = total_deposit { + if amount > self.limit { + return Err(>::StorageDepositLimitExhausted.into()) + } + } + self.total_deposit = total_deposit; + self.own_deposit = self.own_deposit.saturating_add(&deposit); + Ok(deposit) } /// Charge from `origin` a storage deposit for contract instantiation. @@ -382,22 +300,25 @@ where contract: &T::AccountId, info: &mut ContractInfo, ) -> Result, DispatchError> { - debug_assert!(self.is_alive()); - let mut deposit = - Diff { bytes_added: info.encoded_size() as u32, items_added: 1, ..Default::default() } - .update_contract::(None); - - // Instantiate needs to transfer the minimum balance at least in order to pull the - // contract's account into existence. - deposit = deposit.max(Deposit::Charge(Pallet::::min_balance())); - if deposit.charge_or_zero() > self.limit { - return Err(>::StorageDepositLimitExhausted.into()) + debug_assert!(!self.terminated); + let deposit = Diff { + bytes_added: info.encoded_size() as u32, + items_added: 1, + require_ed: true, + ..Default::default() } - - // We do not increase `own_contribution` because this will be charged later when the - // contract execution does conclude and hence would lead to a double charge. - self.total_deposit = deposit.clone(); - info.storage_base_deposit = deposit.charge_or_zero(); + .to_deposit::(); + debug_assert!(matches!(deposit, Deposit::Charge(_))); + // We do not increase `own_deposit` because this will be charged later when the contract + // execution does conclude. + let total_deposit = self.total_deposit.saturating_add(&deposit); + if let Deposit::Charge(amount) = &total_deposit { + if amount > &self.limit { + return Err(>::StorageDepositLimitExhausted.into()) + } + } + info.storage_deposit = info.storage_deposit.saturating_add(deposit.charge_or_zero()); + self.total_deposit = total_deposit; if !deposit.is_zero() { // We need to charge immediately so that the account is created before the `value` // is transferred from the caller to the contract. @@ -410,55 +331,34 @@ where /// /// This will manipulate the meter so that all storage deposit accumulated in /// `contract_info` will be refunded to the `origin` of the meter. - pub fn terminate(&mut self, info: &ContractInfo) { - debug_assert!(self.is_alive()); - self.own_contribution = Contribution::Terminated(Deposit::Refund(info.total_deposit())); - } - - /// [`Self::charge`] does not enforce the storage limit since we want to do this check as late - /// as possible to allow later refunds to offset earlier charges. - /// - /// # Note - /// - /// We only need to call this **once** for every call stack and not for every cross contract - /// call. Hence this is only called when the last call frame returns. - pub fn enforce_limit( - &mut self, - info: Option<&mut ContractInfo>, - ) -> Result<(), DispatchError> { - let deposit = self.own_contribution.update_contract(info); - let total_deposit = self.total_deposit.saturating_add(&deposit); - // We don't want to override a `Terminated` with a `Checked`. - if self.is_alive() { - self.own_contribution = Contribution::Checked(deposit); - } - if let Deposit::Charge(amount) = total_deposit { - if amount > self.limit { - return Err(>::StorageDepositLimitExhausted.into()) - } - } - Ok(()) + pub fn terminate(&mut self, contract_info: &ContractInfo) { + debug_assert!(!self.terminated); + let refund = Deposit::Refund(contract_info.storage_deposit); + + // The deposit for `own_deposit` isn't persisted into the contract info until the current + // frame is dropped. This means that whatever changes were introduced during the + // current frame are dicarded when terminating. + self.total_deposit = + self.total_deposit.saturating_add(&refund).saturating_sub(&self.own_deposit); + self.own_deposit = refund; + self.terminated = true; } } -impl Ext for ReservingExt -where - T: Config, - T::AccountId: UncheckedFrom + AsRef<[u8]>, -{ +impl Ext for ReservingExt { fn check_limit( origin: &T::AccountId, limit: Option>, min_leftover: BalanceOf, ) -> Result, DispatchError> { - let max = T::Currency::reducible_balance(origin, true).saturating_sub(min_leftover); - let limit = limit.unwrap_or(max); - ensure!( - limit <= max && - matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), - >::StorageDepositNotEnoughFunds, - ); - Ok(limit) + let max = T::Currency::free_balance(origin) + .saturating_sub(T::Currency::minimum_balance()) + .saturating_sub(min_leftover); + match limit { + Some(limit) if limit <= max => Ok(limit), + None => Ok(max), + _ => Err(>::StorageDepositNotEnoughFunds.into()), + } } fn charge( @@ -493,9 +393,6 @@ where "Failed to transfer storage deposit {:?} from origin {:?} to contract {:?}: {:?}", amount, origin, contract, err, ); - if cfg!(debug_assertions) { - panic!("Unable to collect storage deposit. This is a bug."); - } } }, // For `Refund(_)` no error happen because the initial value transfer from the @@ -514,7 +411,7 @@ where // refund because we consider this unexpected behaviour. *amount.min( &T::Currency::reserved_balance(contract) - .saturating_sub(Pallet::::min_balance()), + .saturating_sub(T::Currency::minimum_balance()), ) }; let result = @@ -525,9 +422,6 @@ where "Failed to repatriate storage deposit {:?} from contract {:?} to origin {:?}: {:?}", amount, contract, origin, result, ); - if cfg!(debug_assertions) { - panic!("Unable to refund storage deposit. This is a bug."); - } } }, }; @@ -547,23 +441,23 @@ mod tests { exec::AccountIdOf, tests::{Test, ALICE, BOB, CHARLIE}, }; - use frame_support::parameter_types; use pretty_assertions::assert_eq; + use std::cell::RefCell; type TestMeter = RawMeter; - parameter_types! { - static TestExtTestValue: TestExt = Default::default(); + thread_local! { + static TEST_EXT: RefCell = RefCell::new(Default::default()); } - #[derive(Debug, PartialEq, Eq, Clone)] + #[derive(Debug, PartialEq, Eq)] struct LimitCheck { origin: AccountIdOf, limit: BalanceOf, min_leftover: BalanceOf, } - #[derive(Debug, PartialEq, Eq, Clone)] + #[derive(Debug, PartialEq, Eq)] struct Charge { origin: AccountIdOf, contract: AccountIdOf, @@ -571,8 +465,8 @@ mod tests { terminated: bool, } - #[derive(Default, Debug, PartialEq, Eq, Clone)] - pub struct TestExt { + #[derive(Default, Debug, PartialEq, Eq)] + struct TestExt { limit_checks: Vec, charges: Vec, } @@ -591,9 +485,12 @@ mod tests { min_leftover: BalanceOf, ) -> Result, DispatchError> { let limit = limit.unwrap_or(42); - TestExtTestValue::mutate(|ext| { - ext.limit_checks - .push(LimitCheck { origin: origin.clone(), limit, min_leftover }) + TEST_EXT.with(|ext| { + ext.borrow_mut().limit_checks.push(LimitCheck { + origin: origin.clone(), + limit, + min_leftover, + }) }); Ok(limit) } @@ -604,8 +501,8 @@ mod tests { amount: &DepositOf, terminated: bool, ) { - TestExtTestValue::mutate(|ext| { - ext.charges.push(Charge { + TEST_EXT.with(|ext| { + ext.borrow_mut().charges.push(Charge { origin: origin.clone(), contract: contract.clone(), amount: amount.clone(), @@ -616,29 +513,17 @@ mod tests { } fn clear_ext() { - TestExtTestValue::mutate(|ext| ext.clear()) + TEST_EXT.with(|ext| ext.borrow_mut().clear()) } - #[derive(Default)] - struct StorageInfo { - bytes: u32, - items: u32, - bytes_deposit: BalanceOf, - items_deposit: BalanceOf, - } - - fn new_info(info: StorageInfo) -> ContractInfo { + fn new_info(deposit: BalanceOf) -> ContractInfo { use crate::storage::Storage; use sp_runtime::traits::Hash; ContractInfo:: { trie_id: >::generate_trie_id(&ALICE, 42), code_hash: ::Hashing::hash(b"42"), - storage_bytes: info.bytes, - storage_items: info.items, - storage_byte_deposit: info.bytes_deposit, - storage_item_deposit: info.items_deposit, - storage_base_deposit: Default::default(), + storage_deposit: deposit, } } @@ -648,13 +533,15 @@ mod tests { TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - ..Default::default() - } - ) + TEST_EXT.with(|ext| { + assert_eq!( + *ext.borrow(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + ..Default::default() + } + ) + }); } #[test] @@ -666,83 +553,119 @@ mod tests { // an empty charge does not create a `Charge` entry let mut nested0 = meter.nested(); - nested0.charge(&Default::default()); - meter.absorb(nested0, &BOB, None); - - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - ..Default::default() - } - ) + nested0.charge(&Default::default()).unwrap(); + meter.absorb(nested0, &ALICE, &BOB, None); + + TEST_EXT.with(|ext| { + assert_eq!( + *ext.borrow(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + ..Default::default() + } + ) + }); } #[test] - fn charging_works() { + fn existential_deposit_works() { clear_ext(); - let mut meter = TestMeter::new(&ALICE, Some(100), 0).unwrap(); - assert_eq!(meter.available(), 100); + let mut meter = TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); + assert_eq!(meter.available(), 1_000); - let mut nested0_info = - new_info(StorageInfo { bytes: 100, items: 5, bytes_deposit: 100, items_deposit: 10 }); + // a `Refund` will be turned into a `Charge(ed)` which is intended behaviour let mut nested0 = meter.nested(); - nested0.charge(&Diff { - bytes_added: 108, - bytes_removed: 5, - items_added: 1, - items_removed: 2, + nested0.charge(&Diff { require_ed: true, ..Default::default() }).unwrap(); + nested0 + .charge(&Diff { bytes_removed: 1, require_ed: true, ..Default::default() }) + .unwrap(); + meter.absorb(nested0, &ALICE, &BOB, None); + + TEST_EXT.with(|ext| { + assert_eq!( + *ext.borrow(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + charges: vec![Charge { + origin: ALICE, + contract: BOB, + amount: Deposit::Charge(::Currency::minimum_balance() * 2), + terminated: false, + }] + } + ) }); - nested0.charge(&Diff { bytes_removed: 99, ..Default::default() }); + } - let mut nested1_info = - new_info(StorageInfo { bytes: 100, items: 10, bytes_deposit: 100, items_deposit: 20 }); - let mut nested1 = nested0.nested(); - nested1.charge(&Diff { items_removed: 5, ..Default::default() }); - nested0.absorb(nested1, &CHARLIE, Some(&mut nested1_info)); + #[test] + fn charging_works() { + clear_ext(); - let mut nested2_info = - new_info(StorageInfo { bytes: 100, items: 7, bytes_deposit: 100, items_deposit: 20 }); - let mut nested2 = nested0.nested(); - nested2.charge(&Diff { items_removed: 7, ..Default::default() }); - nested0.absorb(nested2, &CHARLIE, Some(&mut nested2_info)); + let min_balance = ::Currency::minimum_balance(); - nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); - meter.absorb(nested0, &BOB, Some(&mut nested0_info)); + let mut meter = TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); + assert_eq!(meter.available(), 1_000); - meter.into_deposit(&ALICE); + let mut nested0_info = new_info(100); + let mut nested0 = meter.nested(); + nested0 + .charge(&Diff { + bytes_added: 10, + bytes_removed: 5, + items_added: 1, + items_removed: 2, + ..Default::default() + }) + .unwrap(); + nested0.charge(&Diff { bytes_removed: 1, ..Default::default() }).unwrap(); - assert_eq!(nested0_info.extra_deposit(), 112); - assert_eq!(nested1_info.extra_deposit(), 110); - assert_eq!(nested2_info.extra_deposit(), 100); + let mut nested1_info = new_info(50); + let mut nested1 = nested0.nested(); + nested1.charge(&Diff { items_removed: 5, ..Default::default() }).unwrap(); + nested0.absorb(nested1, &ALICE, &CHARLIE, Some(&mut nested1_info)); - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 100, min_leftover: 0 }], - charges: vec![ - Charge { - origin: ALICE, - contract: CHARLIE, - amount: Deposit::Refund(10), - terminated: false - }, - Charge { - origin: ALICE, - contract: CHARLIE, - amount: Deposit::Refund(20), - terminated: false - }, - Charge { - origin: ALICE, - contract: BOB, - amount: Deposit::Charge(2), - terminated: false - } - ] - } - ) + // Trying to refund more than is available in the contract will cap the charge + // to (deposit_in_contract - ed). + let mut nested2_info = new_info(5); + let mut nested2 = nested0.nested(); + nested2.charge(&Diff { bytes_removed: 7, ..Default::default() }).unwrap(); + nested0.absorb(nested2, &ALICE, &CHARLIE, Some(&mut nested2_info)); + + meter.absorb(nested0, &ALICE, &BOB, Some(&mut nested0_info)); + + assert_eq!(nested0_info.storage_deposit, 102); + assert_eq!(nested1_info.storage_deposit, 40); + assert_eq!(nested2_info.storage_deposit, min_balance); + + TEST_EXT.with(|ext| { + assert_eq!( + *ext.borrow(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + charges: vec![ + Charge { + origin: ALICE, + contract: CHARLIE, + amount: Deposit::Refund(10), + terminated: false + }, + Charge { + origin: ALICE, + contract: CHARLIE, + amount: Deposit::Refund(4), + terminated: false + }, + Charge { + origin: ALICE, + contract: BOB, + amount: Deposit::Charge(2), + terminated: false + } + ] + } + ) + }); } #[test] @@ -753,45 +676,48 @@ mod tests { assert_eq!(meter.available(), 1_000); let mut nested0 = meter.nested(); - nested0.charge(&Diff { - bytes_added: 5, - bytes_removed: 1, - items_added: 3, - items_removed: 1, - }); - nested0.charge(&Diff { items_added: 2, ..Default::default() }); + nested0 + .charge(&Diff { + bytes_added: 5, + bytes_removed: 1, + items_added: 3, + items_removed: 1, + ..Default::default() + }) + .unwrap(); + nested0.charge(&Diff { items_added: 2, ..Default::default() }).unwrap(); - let mut nested1_info = - new_info(StorageInfo { bytes: 100, items: 10, bytes_deposit: 100, items_deposit: 20 }); + let nested1_info = new_info(400); let mut nested1 = nested0.nested(); - nested1.charge(&Diff { items_removed: 5, ..Default::default() }); - nested1.charge(&Diff { bytes_added: 20, ..Default::default() }); + nested1.charge(&Diff { items_removed: 5, ..Default::default() }).unwrap(); + nested1.charge(&Diff { bytes_added: 20, ..Default::default() }).unwrap(); nested1.terminate(&nested1_info); - nested0.enforce_limit(Some(&mut nested1_info)).unwrap(); - nested0.absorb(nested1, &CHARLIE, None); - - meter.absorb(nested0, &BOB, None); - meter.into_deposit(&ALICE); - - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - charges: vec![ - Charge { - origin: ALICE, - contract: CHARLIE, - amount: Deposit::Refund(120), - terminated: true - }, - Charge { - origin: ALICE, - contract: BOB, - amount: Deposit::Charge(12), - terminated: false - } - ] - } - ) + nested0.absorb(nested1, &ALICE, &CHARLIE, None); + + meter.absorb(nested0, &ALICE, &BOB, None); + drop(meter); + + TEST_EXT.with(|ext| { + assert_eq!( + *ext.borrow(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + charges: vec![ + Charge { + origin: ALICE, + contract: CHARLIE, + amount: Deposit::Refund(400), + terminated: true + }, + Charge { + origin: ALICE, + contract: BOB, + amount: Deposit::Charge(12), + terminated: false + } + ] + } + ) + }); } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index bc2ee31681d7f..0febfec929b6e 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use self::test_utils::hash; use crate::{ chain_extension::{ ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, @@ -23,27 +22,27 @@ use crate::{ }, exec::{FixSizedKey, Frame}, storage::Storage, - tests::test_utils::{get_contract, get_contract_checked}, - wasm::{Determinism, PrefabWasmModule, ReturnCode as RuntimeReturnCode}, + wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, DeletionQueue, - Error, Pallet, Schedule, + BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, + DefaultContractAccessWeight, DeletionQueue, Error, Pallet, Schedule, }; use assert_matches::assert_matches; use codec::Encode; use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_noop, assert_ok, - dispatch::{DispatchClass, DispatchErrorWithPostInfo, PostDispatchInfo}, + dispatch::DispatchErrorWithPostInfo, parameter_types, storage::child, traits::{ - BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, LockableCurrency, OnIdle, - OnInitialize, ReservableCurrency, WithdrawReasons, + BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, OnIdle, OnInitialize, + ReservableCurrency, }, - weights::{constants::WEIGHT_PER_SECOND, Weight}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, PostDispatchInfo, Weight}, }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; +use sp_core::Bytes; use sp_io::hashing::blake2_256; use sp_keystore::{testing::KeyStore, KeystoreExt}; use sp_runtime::{ @@ -51,7 +50,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, AccountId32, }; -use std::sync::Arc; +use std::{cell::RefCell, sync::Arc}; use crate as pallet_contracts; @@ -75,11 +74,8 @@ frame_support::construct_runtime!( #[macro_use] pub mod test_utils { - use super::{Balances, Hash, SysConfig, Test}; - use crate::{ - exec::AccountIdOf, storage::Storage, CodeHash, Config, ContractInfo, ContractInfoOf, Nonce, - }; - use codec::Encode; + use super::{Balances, Test}; + use crate::{exec::AccountIdOf, storage::Storage, CodeHash, Config, ContractInfoOf, Nonce}; use frame_support::traits::Currency; pub fn place_contract(address: &AccountIdOf, code_hash: CodeHash) { @@ -99,21 +95,11 @@ pub mod test_utils { pub fn get_balance(who: &AccountIdOf) -> u64 { Balances::free_balance(who) } - pub fn get_contract(addr: &AccountIdOf) -> ContractInfo { - get_contract_checked(addr).unwrap() - } - pub fn get_contract_checked(addr: &AccountIdOf) -> Option> { - ContractInfoOf::::get(addr) - } - pub fn hash(s: &S) -> <::Hashing as Hash>::Output { - <::Hashing as Hash>::hash_of(s) - } macro_rules! assert_return_code { ( $x:expr , $y:expr $(,)? ) => {{ assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); }}; } - macro_rules! assert_refcount { ( $code_hash:expr , $should:expr $(,)? ) => {{ let is = crate::OwnerInfoOf::::get($code_hash).map(|m| m.refcount()).unwrap(); @@ -122,11 +108,10 @@ pub mod test_utils { } } -parameter_types! { - static TestExtensionTestValue: TestExtension = Default::default(); +thread_local! { + static TEST_EXTENSION: RefCell = Default::default(); } -#[derive(Clone)] pub struct TestExtension { enabled: bool, last_seen_buffer: Vec, @@ -146,15 +131,15 @@ pub struct TempStorageExtension { impl TestExtension { fn disable() { - TestExtensionTestValue::mutate(|e| e.enabled = false) + TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) } fn last_seen_buffer() -> Vec { - TestExtensionTestValue::get().last_seen_buffer.clone() + TEST_EXTENSION.with(|e| e.borrow().last_seen_buffer.clone()) } fn last_seen_inputs() -> (u32, u32, u32, u32) { - TestExtensionTestValue::get().last_seen_inputs + TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs.clone()) } } @@ -177,19 +162,20 @@ impl ChainExtension for TestExtension { let mut env = env.buf_in_buf_out(); let input = env.read(8)?; env.write(&input, false, None)?; - TestExtensionTestValue::mutate(|e| e.last_seen_buffer = input); + TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); Ok(RetVal::Converging(id)) }, 1 => { let env = env.only_in(); - TestExtensionTestValue::mutate(|e| { - e.last_seen_inputs = (env.val0(), env.val1(), env.val2(), env.val3()) + TEST_EXTENSION.with(|e| { + e.borrow_mut().last_seen_inputs = + (env.val0(), env.val1(), env.val2(), env.val3()) }); Ok(RetVal::Converging(id)) }, 2 => { let mut env = env.buf_in_buf_out(); - let weight = Weight::from_ref_time(env.read(5)?[4].into()); + let weight = env.read(5)?[4].into(); env.charge_weight(weight)?; Ok(RetVal::Converging(id)) }, @@ -201,7 +187,7 @@ impl ChainExtension for TestExtension { } fn enabled() -> bool { - TestExtensionTestValue::get().enabled + TEST_EXTENSION.with(|e| e.borrow().enabled) } } @@ -219,7 +205,7 @@ impl ChainExtension for RevertingExtension { } fn enabled() -> bool { - TestExtensionTestValue::get().enabled + TEST_EXTENSION.with(|e| e.borrow().enabled) } } @@ -268,7 +254,7 @@ impl ChainExtension for TempStorageExtension { } fn enabled() -> bool { - TestExtensionTestValue::get().enabled + TEST_EXTENSION.with(|e| e.borrow().enabled) } } @@ -278,9 +264,7 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); pub static ExistentialDeposit: u64 = 1; } impl frame_system::Config for Test { @@ -288,16 +272,16 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = AccountId32; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -315,7 +299,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -329,8 +313,8 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_utility::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type PalletsOrigin = OriginCaller; type WeightInfo = (); } @@ -340,7 +324,6 @@ parameter_types! { // We want stack height to be always enabled for tests so that this // instrumentation path is always tested implicitly. schedule.limits.stack_height = Some(512); - schedule.instruction_weights.fallback = 1; schedule }; pub static DepositPerByte: BalanceOf = 1; @@ -349,50 +332,35 @@ parameter_types! { impl Convert> for Test { fn convert(w: Weight) -> BalanceOf { - w.ref_time() + w } } /// A filter whose filter function can be swapped at runtime. pub struct TestFilter; -#[derive(Clone)] -pub struct Filters { - filter: fn(&RuntimeCall) -> bool, -} - -impl Default for Filters { - fn default() -> Self { - Filters { filter: (|_| true) } - } -} - -parameter_types! { - static CallFilter: Filters = Default::default(); +thread_local! { + static CALL_FILTER: RefCell bool> = RefCell::new(|_| true); } impl TestFilter { - pub fn set_filter(filter: fn(&RuntimeCall) -> bool) { - CallFilter::mutate(|fltr| fltr.filter = filter); + pub fn set_filter(filter: fn(&Call) -> bool) { + CALL_FILTER.with(|fltr| *fltr.borrow_mut() = filter); } } -impl Contains for TestFilter { - fn contains(call: &RuntimeCall) -> bool { - (CallFilter::get().filter)(call) +impl Contains for TestFilter { + fn contains(call: &Call) -> bool { + CALL_FILTER.with(|fltr| fltr.borrow()(call)) } } -parameter_types! { - pub const DeletionWeightLimit: Weight = Weight::from_ref_time(500_000_000_000); -} - impl Config for Test { type Time = Timestamp; type Randomness = Randomness; type Currency = Balances; - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type CallFilter = TestFilter; type CallStack = [Frame; 31]; type WeightPrice = Self; @@ -400,12 +368,14 @@ impl Config for Test { type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type DeletionQueueDepth = ConstU32<1024>; - type DeletionWeightLimit = DeletionWeightLimit; + type DeletionWeightLimit = ConstU64<500_000_000_000>; type Schedule = MySchedule; type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; type AddressGenerator = DefaultAddressGenerator; + type ContractAccessWeight = DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; + type RelaxedMaxCodeLen = ConstU32<{ 256 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } @@ -414,7 +384,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(256 * 1024); +pub const GAS_LIMIT: Weight = 100_000_000_000; pub struct ExtBuilder { existential_deposit: u64, @@ -500,7 +470,7 @@ fn calling_plain_account_fails() { let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), + Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), Err(DispatchErrorWithPostInfo { error: Error::::ContractNotFound.into(), post_info: PostDispatchInfo { @@ -523,19 +493,14 @@ fn instantiate_and_call_and_deposit_event() { // We determine the storage deposit limit after uploading because it depends on ALICEs free // balance which is changed by uploading a module. - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - None, - Determinism::Deterministic - )); + assert_ok!(Contracts::upload_code(Origin::signed(ALICE), wasm, None)); // Drop previous events initialize_block(2); // Check at the end to get hash on error easily assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), value, GAS_LIMIT, None, @@ -551,14 +516,12 @@ fn instantiate_and_call_and_deposit_event() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: addr.clone() - }), + event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + event: Event::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance, }), @@ -566,7 +529,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance, @@ -575,7 +538,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: min_balance, }), @@ -583,7 +546,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: value, @@ -592,7 +555,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::ContractEmitted { + event: Event::Contracts(crate::Event::ContractEmitted { contract: addr.clone(), data: vec![1, 2, 3, 4] }), @@ -600,11 +563,11 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { + event: Event::Contracts(crate::Event::Instantiated { deployer: ALICE, contract: addr.clone() }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -619,7 +582,7 @@ fn deposit_event_max_value_limit() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -631,10 +594,10 @@ fn deposit_event_max_value_limit() { // Call contract with allowed storage value. assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, + GAS_LIMIT * 2, // we are copying a huge buffer, None, ::Schedule::get().limits.payload_len.encode(), )); @@ -642,7 +605,7 @@ fn deposit_event_max_value_limit() { // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr, 0, GAS_LIMIT, @@ -662,7 +625,7 @@ fn run_out_of_gas() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100 * min_balance, GAS_LIMIT, None, @@ -676,10 +639,10 @@ fn run_out_of_gas() { // loops forever. assert_err_ignore_postinfo!( Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr, // newly created account 0, - Weight::from_ref_time(1_000_000_000_000).set_proof_size(u64::MAX), + 1_000_000_000_000, None, vec![], ), @@ -696,18 +659,12 @@ fn instantiate_unique_trie_id() { ExtBuilder::default().existential_deposit(500).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - None, - Determinism::Deterministic, - ) - .unwrap(); + Contracts::upload_code(Origin::signed(ALICE), wasm, None).unwrap(); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Instantiate the contract and store its trie id for later comparison. assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -715,12 +672,12 @@ fn instantiate_unique_trie_id() { vec![], vec![], )); - let trie_id = get_contract(&addr).trie_id; + let trie_id = ContractInfoOf::::get(&addr).unwrap().trie_id; // Try to instantiate it again without termination should yield an error. assert_err_ignore_postinfo!( Contracts::instantiate( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -733,7 +690,7 @@ fn instantiate_unique_trie_id() { // Terminate the contract. assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -743,7 +700,7 @@ fn instantiate_unique_trie_id() { // Re-Instantiate after termination. assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -753,7 +710,7 @@ fn instantiate_unique_trie_id() { )); // Trie ids shouldn't match or we might have a collision - assert_ne!(trie_id, get_contract(&addr).trie_id); + assert_ne!(trie_id, ContractInfoOf::::get(&addr).unwrap().trie_id); }); } @@ -765,7 +722,7 @@ fn storage_max_value_limit() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -774,14 +731,14 @@ fn storage_max_value_limit() { vec![], )); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - get_contract(&addr); + ContractInfoOf::::get(&addr).unwrap(); // Call contract with allowed storage value. assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer + GAS_LIMIT * 2, // we are copying a huge buffer None, ::Schedule::get().limits.payload_len.encode(), )); @@ -789,7 +746,7 @@ fn storage_max_value_limit() { // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr, 0, GAS_LIMIT, @@ -814,7 +771,7 @@ fn deploy_and_call_other_contract() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -823,7 +780,7 @@ fn deploy_and_call_other_contract() { vec![], )); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -838,7 +795,7 @@ fn deploy_and_call_other_contract() { // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), caller_addr.clone(), 0, GAS_LIMIT, @@ -851,14 +808,14 @@ fn deploy_and_call_other_contract() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { + event: Event::System(frame_system::Event::NewAccount { account: callee_addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + event: Event::Balances(pallet_balances::Event::Endowed { account: callee_addr.clone(), free_balance: min_balance, }), @@ -866,7 +823,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: callee_addr.clone(), amount: min_balance, @@ -875,7 +832,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: callee_addr.clone(), amount: min_balance, }), @@ -883,7 +840,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: caller_addr.clone(), to: callee_addr.clone(), amount: 32768, // hard coded in wasm @@ -892,37 +849,21 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { + event: Event::Contracts(crate::Event::Instantiated { deployer: caller_addr.clone(), contract: callee_addr.clone(), }), - topics: vec![hash(&caller_addr), hash(&callee_addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: caller_addr.clone(), to: callee_addr.clone(), amount: 32768, }), topics: vec![], }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: caller_addr.clone(), - contract: callee_addr.clone(), - }), - topics: vec![hash(&caller_addr), hash(&callee_addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: caller_addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&caller_addr)], - }, ] ); }); @@ -939,7 +880,7 @@ fn delegate_call() { // Instantiate the 'caller' assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 300_000, GAS_LIMIT, None, @@ -949,14 +890,13 @@ fn delegate_call() { )); // Only upload 'callee' code assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), callee_wasm, Some(codec::Compact(100_000)), - Determinism::Deterministic, )); assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), caller_addr.clone(), 1337, GAS_LIMIT, @@ -974,7 +914,7 @@ fn cannot_self_destruct_through_draning() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 1_000, GAS_LIMIT, None, @@ -985,12 +925,12 @@ fn cannot_self_destruct_through_draning() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - get_contract(&addr); + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); // Call BOB which makes it send all funds to the zero address // The contract code asserts that the transfer was successful assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1015,7 +955,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -1026,26 +966,25 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), min_balance); - assert_eq!(get_contract(&addr).extra_deposit(), 0); + let info = ContractInfoOf::::get(&addr).unwrap(); + assert_eq!(info.storage_deposit, min_balance); assert_eq!(::Currency::total_balance(&addr), min_balance); - // Create 100 bytes of storage with a price of per byte and a single storage item of price 2 + // Create 100 bytes of storage with a price of per byte assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, 100u32.to_le_bytes().to_vec() )); - assert_eq!(get_contract(&addr).total_deposit(), min_balance + 102); - // Increase the byte price and trigger a refund. This should not have any influence because - // the removal is pro rata and exactly those 100 bytes should have been removed. + // Increase the byte price and trigger a refund. This could potentially destroy the account + // because the refund removes the reserved existential deposit. This should not happen. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1056,9 +995,8 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { // Make sure the account wasn't removed by the refund assert_eq!( ::Currency::total_balance(&addr), - get_contract(&addr).total_deposit(), + ::Currency::minimum_balance(), ); - assert_eq!(get_contract(&addr).extra_deposit(), 2,); }); } @@ -1070,7 +1008,7 @@ fn cannot_self_destruct_by_refund_after_slash() { let min_balance = ::Currency::minimum_balance(); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -1082,7 +1020,7 @@ fn cannot_self_destruct_by_refund_after_slash() { // create 100 more reserved balance assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1100,7 +1038,7 @@ fn cannot_self_destruct_by_refund_after_slash() { // trigger a refund of 50 which would bring the contract below min when actually refunded assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1111,12 +1049,15 @@ fn cannot_self_destruct_by_refund_after_slash() { // Make sure the account kept the minimum balance and was not destroyed assert_eq!(::Currency::total_balance(&addr), min_balance); + // even though it was not charged it is still substracted from the storage deposit tracker + assert_eq!(ContractInfoOf::::get(&addr).unwrap().storage_deposit, 550); + assert_eq!( System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Slashed { + event: Event::Balances(pallet_balances::Event::Slashed { who: addr.clone(), amount: 90, }), @@ -1124,15 +1065,7 @@ fn cannot_self_destruct_by_refund_after_slash() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::ReserveRepatriated { + event: Event::Balances(pallet_balances::Event::ReserveRepatriated { from: addr.clone(), to: ALICE, amount: 10, @@ -1153,7 +1086,7 @@ fn cannot_self_destruct_while_live() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1164,24 +1097,17 @@ fn cannot_self_destruct_while_live() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - get_contract(&addr); + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); // Call BOB with input data, forcing it make a recursive call to itself to // self-destruct, resulting in a trap. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0], - ), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![0],), Error::::ContractTrapped, ); // Check that BOB is still there. - get_contract(&addr); + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); }); } @@ -1194,7 +1120,7 @@ fn self_destruct_works() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1205,14 +1131,14 @@ fn self_destruct_works() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - get_contract(&addr); + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); // Drop all previous events initialize_block(2); // Call BOB without input data which triggers termination. assert_matches!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), Ok(_) ); @@ -1220,7 +1146,7 @@ fn self_destruct_works() { assert_refcount!(&code_hash, 0); // Check that account is gone - assert!(get_contract_checked(&addr).is_none()); + assert!(ContractInfoOf::::get(&addr).is_none()); assert_eq!(Balances::total_balance(&addr), 0); // check that the beneficiary (django) got remaining balance @@ -1231,7 +1157,7 @@ fn self_destruct_works() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: addr.clone(), to: DJANGO, amount: 100_000, @@ -1240,30 +1166,22 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Terminated { + event: Event::Contracts(crate::Event::Terminated { contract: addr.clone(), beneficiary: DJANGO }), - topics: vec![hash(&addr), hash(&DJANGO)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::KilledAccount { + event: Event::System(frame_system::Event::KilledAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::ReserveRepatriated { + event: Event::Balances(pallet_balances::Event::ReserveRepatriated { from: addr.clone(), to: ALICE, amount: 1_000, @@ -1287,7 +1205,7 @@ fn destroy_contract_and_transfer_funds() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 200_000, GAS_LIMIT, None, @@ -1299,7 +1217,7 @@ fn destroy_contract_and_transfer_funds() { // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 200_000, GAS_LIMIT, None, @@ -1311,11 +1229,11 @@ fn destroy_contract_and_transfer_funds() { let addr_charlie = Contracts::contract_address(&addr_bob, &callee_code_hash, &[0x47, 0x11]); // Check that the CHARLIE contract has been instantiated. - get_contract(&addr_charlie); + assert_matches!(ContractInfoOf::::get(&addr_charlie), Some(_)); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr_bob, 0, GAS_LIMIT, @@ -1324,7 +1242,7 @@ fn destroy_contract_and_transfer_funds() { )); // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(get_contract_checked(&addr_charlie).is_none()); + assert!(ContractInfoOf::::get(&addr_charlie).is_none()); }); } @@ -1337,7 +1255,7 @@ fn cannot_self_destruct_in_constructor() { // Fail to instantiate the BOB because the contructor calls seal_terminate. assert_err_ignore_postinfo!( Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1359,7 +1277,7 @@ fn crypto_hashes() { // Instantiate the CRYPTO_HASHES contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1389,18 +1307,10 @@ fn crypto_hashes() { // We offset data in the contract tables by 1. let mut params = vec![(n + 1) as u8]; params.extend_from_slice(input); - let result = >::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - params, - false, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = + >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, params, false) + .result + .unwrap(); assert!(!result.did_revert()); let expected = hash_fn(input.as_ref()); assert_eq!(&result.data[..*expected_size], &*expected); @@ -1416,7 +1326,7 @@ fn transfer_return_code() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1428,18 +1338,9 @@ fn transfer_return_code() { // Contract has only the minimal balance so any transfer will fail. Balances::make_free_balance_be(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - false, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough total balance in order to not go below the min balance @@ -1447,18 +1348,9 @@ fn transfer_return_code() { // the transfer still fails. Balances::make_free_balance_be(&addr, min_balance + 100); Balances::reserve(&addr, min_balance + 100).unwrap(); - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - false, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, None, vec![], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1473,7 +1365,7 @@ fn call_return_code() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1493,14 +1385,13 @@ fn call_return_code() { None, AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), false, - Determinism::Deterministic, ) .result .unwrap(); assert_return_code!(result, RuntimeReturnCode::NotCallable); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(CHARLIE), + Origin::signed(CHARLIE), min_balance * 100, GAS_LIMIT, None, @@ -1524,7 +1415,6 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1547,7 +1437,6 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1567,7 +1456,6 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1586,7 +1474,6 @@ fn call_return_code() { .cloned() .collect(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1605,7 +1492,7 @@ fn instantiate_return_code() { let callee_hash = callee_hash.as_ref().to_vec(); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1615,7 +1502,7 @@ fn instantiate_return_code() { ),); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1635,7 +1522,6 @@ fn instantiate_return_code() { None, callee_hash.clone(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1654,7 +1540,6 @@ fn instantiate_return_code() { None, callee_hash.clone(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1662,18 +1547,10 @@ fn instantiate_return_code() { // Contract has enough balance but the passed code hash is invalid Balances::make_free_balance_be(&addr, min_balance + 10_000); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0; 33], - false, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![0; 33], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. @@ -1685,7 +1562,6 @@ fn instantiate_return_code() { None, callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1700,7 +1576,6 @@ fn instantiate_return_code() { None, callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1717,7 +1592,7 @@ fn disabled_chain_extension_wont_deploy() { TestExtension::disable(); assert_err_ignore_postinfo!( Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 3 * min_balance, GAS_LIMIT, None, @@ -1737,7 +1612,7 @@ fn disabled_chain_extension_errors_on_call() { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1748,7 +1623,7 @@ fn disabled_chain_extension_errors_on_call() { let addr = Contracts::contract_address(&ALICE, &hash, &[]); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), Error::::NoChainExtension, ); }); @@ -1761,7 +1636,7 @@ fn chain_extension_works() { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1773,18 +1648,10 @@ fn chain_extension_works() { // 0 = read input buffer and pass it through as output let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - false, - Determinism::Deterministic, - ); + let result = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); - assert_eq!(result.result.unwrap().data, input); + assert_eq!(result.result.unwrap().data, Bytes(input)); // 1 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( @@ -1795,7 +1662,6 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), false, - Determinism::Deterministic, ) .result .unwrap(); @@ -1811,7 +1677,6 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), false, - Determinism::Deterministic, ); assert_ok!(result.result); let gas_consumed = result.gas_consumed; @@ -1823,10 +1688,9 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), false, - Determinism::Deterministic, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); + assert_eq!(result.gas_consumed, gas_consumed + 42); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1835,10 +1699,9 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), false, - Determinism::Deterministic, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); + assert_eq!(result.gas_consumed, gas_consumed + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( @@ -1849,12 +1712,11 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), false, - Determinism::Deterministic, ) .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, vec![42, 99]); + assert_eq!(result.data, Bytes(vec![42, 99])); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second @@ -1867,18 +1729,17 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), false, - Determinism::Deterministic, ) .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, vec![0x4B, 0x1D]); + assert_eq!(result.data, Bytes(vec![0x4B, 0x1D])); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension assert_err_ignore_postinfo!( Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1897,7 +1758,7 @@ fn chain_extension_temp_storage_works() { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1917,17 +1778,8 @@ fn chain_extension_temp_storage_works() { ); assert_ok!( - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - false, - Determinism::Deterministic - ) - .result + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false) + .result ); }) } @@ -1940,7 +1792,7 @@ fn lazy_removal_works() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1950,7 +1802,7 @@ fn lazy_removal_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = get_contract(&addr); + let info = >::get(&addr).unwrap(); let trie = &info.child_trie_info(); // Put value into the contracts child trie @@ -1958,7 +1810,7 @@ fn lazy_removal_works() { // Terminate the contract assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1973,7 +1825,7 @@ fn lazy_removal_works() { assert_matches!(child::get(trie, &[99]), Some(42)); // Run the lazy removal - Contracts::on_idle(System::block_number(), Weight::MAX); + Contracts::on_idle(System::block_number(), Weight::max_value()); // Value should be gone now assert_matches!(child::get::(trie, &[99]), None); @@ -2009,7 +1861,7 @@ fn lazy_batch_removal_works() { for i in 0..3u8 { assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2019,7 +1871,7 @@ fn lazy_batch_removal_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[i]); - let info = get_contract(&addr); + let info = >::get(&addr).unwrap(); let trie = &info.child_trie_info(); // Put value into the contracts child trie @@ -2028,7 +1880,7 @@ fn lazy_batch_removal_works() { // Terminate the contract. Contract info should be gone, but value should be still there // as the lazy removal did not run, yet. assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2043,7 +1895,7 @@ fn lazy_batch_removal_works() { } // Run single lazy removal - Contracts::on_idle(System::block_number(), Weight::MAX); + Contracts::on_idle(System::block_number(), Weight::max_value()); // The single lazy removal should have removed all queued tries for trie in tries.iter() { @@ -2058,7 +1910,7 @@ fn lazy_removal_partial_remove_works() { // We create a contract with some extra keys above the weight limit let extra_keys = 7u32; - let weight_limit = Weight::from_ref_time(5_000_000_000); + let weight_limit = 5_000_000_000; let (_, max_keys) = Storage::::deletion_budget(1, weight_limit); let vals: Vec<_> = (0..max_keys + extra_keys) .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) @@ -2071,7 +1923,7 @@ fn lazy_removal_partial_remove_works() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2081,7 +1933,7 @@ fn lazy_removal_partial_remove_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = get_contract(&addr); + let info = >::get(&addr).unwrap(); // Put value into the contracts child trie for val in &vals { @@ -2098,7 +1950,7 @@ fn lazy_removal_partial_remove_works() { // Terminate the contract assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2183,7 +2035,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2193,7 +2045,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = get_contract(&addr); + let info = >::get(&addr).unwrap(); let trie = &info.child_trie_info(); // Put value into the contracts child trie @@ -2201,7 +2053,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { // Terminate the contract assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2232,7 +2084,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { assert_matches!(child::get::(trie, &[99]), Some(42)); // Run on_idle with max remaining weight, this should remove the value - Contracts::on_idle(System::block_number(), Weight::MAX); + Contracts::on_idle(System::block_number(), Weight::max_value()); // Value should be gone assert_matches!(child::get::(trie, &[99]), None); @@ -2243,7 +2095,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { fn lazy_removal_does_not_use_all_weight() { let (code, hash) = compile_module::("self_destruct").unwrap(); - let weight_limit = Weight::from_ref_time(5_000_000_000); + let weight_limit = 5_000_000_000; let mut ext = ExtBuilder::default().existential_deposit(50).build(); let (trie, vals, weight_per_key) = ext.execute_with(|| { @@ -2251,7 +2103,7 @@ fn lazy_removal_does_not_use_all_weight() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2261,7 +2113,7 @@ fn lazy_removal_does_not_use_all_weight() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = get_contract(&addr); + let info = >::get(&addr).unwrap(); let (weight_per_key, max_keys) = Storage::::deletion_budget(1, weight_limit); // We create a contract with one less storage item than we can remove within the limit @@ -2284,7 +2136,7 @@ fn lazy_removal_does_not_use_all_weight() { // Terminate the contract assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2314,7 +2166,7 @@ fn lazy_removal_does_not_use_all_weight() { let weight_used = Storage::::process_deletion_queue_batch(weight_limit); // We have one less key in our trie than our weight limit suffices for - assert_eq!(weight_used, weight_limit - Weight::from_ref_time(weight_per_key)); + assert_eq!(weight_used, weight_limit - weight_per_key); // All the keys are removed for val in vals { @@ -2331,7 +2183,7 @@ fn deletion_queue_full() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2347,12 +2199,12 @@ fn deletion_queue_full() { // Terminate the contract should fail assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), Error::::DeletionQueueFull, ); // Contract should exist because removal failed - get_contract(&addr); + >::get(&addr).unwrap(); }); } @@ -2365,7 +2217,7 @@ fn refcounter() { // Create two contracts with the same code and check that they do in fact share it. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2374,7 +2226,7 @@ fn refcounter() { vec![0], )); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2386,7 +2238,7 @@ fn refcounter() { // Sharing should also work with the usual instantiate call assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2402,39 +2254,18 @@ fn refcounter() { let addr2 = Contracts::contract_address(&ALICE, &code_hash, &[2]); // Terminating one contract should decrement the refcount - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr0, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr0, 0, GAS_LIMIT, None, vec![])); assert_refcount!(code_hash, 2); // remove another one - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr1, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr1, 0, GAS_LIMIT, None, vec![])); assert_refcount!(code_hash, 1); // Pristine code should still be there crate::PristineCode::::get(code_hash).unwrap(); // remove the last contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr2, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr2, 0, GAS_LIMIT, None, vec![])); assert_refcount!(code_hash, 0); // refcount is `0` but code should still exists because it needs to be removed manually @@ -2453,7 +2284,7 @@ fn reinstrument_does_charge() { let code_len = wasm.len() as u32; assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2466,28 +2297,12 @@ fn reinstrument_does_charge() { // Call the contract two times without reinstrument - let result0 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - zero.clone(), - false, - Determinism::Deterministic, - ); + let result0 = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); assert!(!result0.result.unwrap().did_revert()); - let result1 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - zero.clone(), - false, - Determinism::Deterministic, - ); + let result1 = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); assert!(!result1.result.unwrap().did_revert()); // They should match because both where called with the same schedule. @@ -2500,22 +2315,13 @@ fn reinstrument_does_charge() { }); // This call should trigger reinstrumentation - let result2 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - zero.clone(), - false, - Determinism::Deterministic, - ); + let result2 = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); assert!(!result2.result.unwrap().did_revert()); - assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); + assert!(result2.gas_consumed > result1.gas_consumed); assert_eq!( - result2.gas_consumed.ref_time(), - result1.gas_consumed.ref_time() + - ::WeightInfo::reinstrument(code_len).ref_time(), + result2.gas_consumed, + result1.gas_consumed + ::WeightInfo::reinstrument(code_len), ); }); } @@ -2527,7 +2333,7 @@ fn debug_message_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -2536,16 +2342,7 @@ fn debug_message_works() { vec![], ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - true, - Determinism::Deterministic, - ); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, None, vec![], true); assert_matches!(result.result, Ok(_)); assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); @@ -2559,7 +2356,7 @@ fn debug_message_logging_disabled() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -2569,19 +2366,10 @@ fn debug_message_logging_disabled() { ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // disable logging by passing `false` - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - false, - Determinism::Deterministic, - ); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![], false); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging - assert_ok!(Contracts::call(RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); assert!(result.debug_message.is_empty()); }); } @@ -2593,7 +2381,7 @@ fn debug_message_invalid_utf8() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -2602,16 +2390,7 @@ fn debug_message_invalid_utf8() { vec![], ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - true, - Determinism::Deterministic, - ); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, None, vec![], true); assert_err!(result.result, >::DebugMessageInvalidUTF8); }); } @@ -2626,7 +2405,7 @@ fn gas_estimation_nested_call_fixed_limit() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2637,7 +2416,7 @@ fn gas_estimation_nested_call_fixed_limit() { let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2650,7 +2429,7 @@ fn gas_estimation_nested_call_fixed_limit() { let input: Vec = AsRef::<[u8]>::as_ref(&addr_callee) .iter() .cloned() - .chain((GAS_LIMIT / 5).ref_time().to_le_bytes()) + .chain((GAS_LIMIT / 5).to_le_bytes()) .collect(); // Call in order to determine the gas that is required for this call @@ -2662,12 +2441,11 @@ fn gas_estimation_nested_call_fixed_limit() { None, input.clone(), false, - Determinism::Deterministic, ); assert_ok!(&result.result); // We have a subcall with a fixed gas limit. This constitutes precharging. - assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); + assert!(result.gas_required > result.gas_consumed); // Make the same call using the estimated gas. Should succeed. assert_ok!( @@ -2679,7 +2457,6 @@ fn gas_estimation_nested_call_fixed_limit() { Some(result.storage_deposit.charge_or_zero()), input, false, - Determinism::Deterministic, ) .result ); @@ -2689,7 +2466,6 @@ fn gas_estimation_nested_call_fixed_limit() { #[test] #[cfg(feature = "unstable-interface")] fn gas_estimation_call_runtime() { - use codec::Decode; let (caller_code, caller_hash) = compile_module::("call_runtime").unwrap(); let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -2698,7 +2474,7 @@ fn gas_estimation_call_runtime() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2709,7 +2485,7 @@ fn gas_estimation_call_runtime() { let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2721,10 +2497,10 @@ fn gas_estimation_call_runtime() { // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. - let call = RuntimeCall::Contracts(crate::Call::call { + let call = Call::Contracts(crate::Call::call { dest: addr_callee, value: 0, - gas_limit: GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() / 3), + gas_limit: GAS_LIMIT / 3, storage_deposit_limit: None, data: vec![], }); @@ -2736,12 +2512,10 @@ fn gas_estimation_call_runtime() { None, call.encode(), false, - Determinism::Deterministic, ); - // contract encodes the result of the dispatch runtime - let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); - assert_eq!(outcome, 0); - assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); + assert_ok!(&result.result); + + assert!(result.gas_required > result.gas_consumed); // Make the same call using the required gas. Should succeed. assert_ok!( @@ -2753,7 +2527,6 @@ fn gas_estimation_call_runtime() { None, call.encode(), false, - Determinism::Deterministic, ) .result ); @@ -2769,7 +2542,7 @@ fn ecdsa_recover() { // Instantiate the ecdsa_recover contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -2802,20 +2575,12 @@ fn ecdsa_recover() { params.extend_from_slice(&signature); params.extend_from_slice(&message_hash); assert!(params.len() == 65 + 32); - let result = >::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - params, - false, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = + >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, params, false) + .result + .unwrap(); assert!(!result.did_revert()); - assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); + assert_eq!(result.data.as_ref(), &EXPECTED_COMPRESSED_PUBLIC_KEY); }) } @@ -2831,10 +2596,9 @@ fn upload_code_works() { assert!(!>::contains_key(code_hash)); assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), wasm, - Some(codec::Compact(1_000)), - Determinism::Deterministic, + Some(codec::Compact(1_000)) )); assert!(>::contains_key(code_hash)); @@ -2843,16 +2607,16 @@ fn upload_code_works() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 241, + amount: 240, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], + event: Event::Contracts(crate::Event::CodeStored { code_hash }), + topics: vec![], }, ] ); @@ -2870,12 +2634,7 @@ fn upload_code_limit_too_low() { initialize_block(2); assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - Some(codec::Compact(100)), - Determinism::Deterministic - ), + Contracts::upload_code(Origin::signed(ALICE), wasm, Some(codec::Compact(100))), >::StorageDepositLimitExhausted, ); @@ -2894,12 +2653,7 @@ fn upload_code_not_enough_balance() { initialize_block(2); assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - Some(codec::Compact(1_000)), - Determinism::Deterministic - ), + Contracts::upload_code(Origin::signed(ALICE), wasm, Some(codec::Compact(1_000))), >::StorageDepositNotEnoughFunds, ); @@ -2918,14 +2672,13 @@ fn remove_code_works() { initialize_block(2); assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), wasm, - Some(codec::Compact(1_000)), - Determinism::Deterministic, + Some(codec::Compact(1_000)) )); assert!(>::contains_key(code_hash)); - assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); + assert_ok!(Contracts::remove_code(Origin::signed(ALICE), code_hash)); assert!(!>::contains_key(code_hash)); assert_eq!( @@ -2933,29 +2686,29 @@ fn remove_code_works() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 241, + amount: 240, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], + event: Event::Contracts(crate::Event::CodeStored { code_hash }), + topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Unreserved { + event: Event::Balances(pallet_balances::Event::Unreserved { who: ALICE, - amount: 241, + amount: 240, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeRemoved { code_hash }), - topics: vec![code_hash], + event: Event::Contracts(crate::Event::CodeRemoved { code_hash }), + topics: vec![], }, ] ); @@ -2973,14 +2726,13 @@ fn remove_code_wrong_origin() { initialize_block(2); assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), wasm, - Some(codec::Compact(1_000)), - Determinism::Deterministic, + Some(codec::Compact(1_000)) )); assert_noop!( - Contracts::remove_code(RuntimeOrigin::signed(BOB), code_hash), + Contracts::remove_code(Origin::signed(BOB), code_hash), sp_runtime::traits::BadOrigin, ); @@ -2989,16 +2741,16 @@ fn remove_code_wrong_origin() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 241, + amount: 240, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], + event: Event::Contracts(crate::Event::CodeStored { code_hash }), + topics: vec![], }, ] ); @@ -3013,7 +2765,7 @@ fn remove_code_in_use() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3026,7 +2778,7 @@ fn remove_code_in_use() { initialize_block(2); assert_noop!( - Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), + Contracts::remove_code(Origin::signed(ALICE), code_hash), >::CodeInUse, ); @@ -3045,7 +2797,7 @@ fn remove_code_not_found() { initialize_block(2); assert_noop!( - Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), + Contracts::remove_code(Origin::signed(ALICE), code_hash), >::CodeNotFound, ); @@ -3065,7 +2817,7 @@ fn instantiate_with_zero_balance_works() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3076,7 +2828,7 @@ fn instantiate_with_zero_balance_works() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - get_contract(&addr); + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); // Make sure the account exists even though no free balance was send assert_eq!(::Currency::free_balance(&addr), 0,); @@ -3090,14 +2842,12 @@ fn instantiate_with_zero_balance_works() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: addr.clone() - }), + event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + event: Event::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance, }), @@ -3105,7 +2855,7 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance, @@ -3114,7 +2864,7 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: min_balance, }), @@ -3122,24 +2872,24 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 241, + amount: 240, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], + event: Event::Contracts(crate::Event::CodeStored { code_hash }), + topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { + event: Event::Contracts(crate::Event::Instantiated { deployer: ALICE, contract: addr.clone(), }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -3158,7 +2908,7 @@ fn instantiate_with_below_existential_deposit_works() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 50, GAS_LIMIT, None, @@ -3169,7 +2919,7 @@ fn instantiate_with_below_existential_deposit_works() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - get_contract(&addr); + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); // Make sure the account exists even though no free balance was send assert_eq!(::Currency::free_balance(&addr), 50,); @@ -3183,14 +2933,12 @@ fn instantiate_with_below_existential_deposit_works() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: addr.clone() - }), + event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + event: Event::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance, }), @@ -3198,7 +2946,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance, @@ -3207,7 +2955,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: min_balance, }), @@ -3215,7 +2963,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: 50, @@ -3224,24 +2972,24 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 241, + amount: 240, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![code_hash], + event: Event::Contracts(crate::Event::CodeStored { code_hash }), + topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { + event: Event::Contracts(crate::Event::Instantiated { deployer: ALICE, contract: addr.clone(), }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -3256,7 +3004,7 @@ fn storage_deposit_works() { let mut deposit = ::Currency::minimum_balance(); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3271,7 +3019,7 @@ fn storage_deposit_works() { // Create storage assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 42, GAS_LIMIT, @@ -3281,11 +3029,11 @@ fn storage_deposit_works() { // 4 is for creating 2 storage items let charged0 = 4 + 1_000 + 5_000; deposit += charged0; - assert_eq!(get_contract(&addr).total_deposit(), deposit); + assert_eq!(>::get(&addr).unwrap().storage_deposit, deposit); // Add more storage (but also remove some) assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -3294,28 +3042,27 @@ fn storage_deposit_works() { )); let charged1 = 1_000 - 100; deposit += charged1; - assert_eq!(get_contract(&addr).total_deposit(), deposit); + assert_eq!(>::get(&addr).unwrap().storage_deposit, deposit); // Remove more storage (but also add some) assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, (2_100u32, 900u32).encode(), )); - // -1 for numeric instability - let refunded0 = 4_000 - 100 - 1; + let refunded0 = 4_000 - 100; deposit -= refunded0; - assert_eq!(get_contract(&addr).total_deposit(), deposit); + assert_eq!(>::get(&addr).unwrap().storage_deposit, deposit); assert_eq!( System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: 42, @@ -3324,15 +3071,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: charged0, @@ -3341,7 +3080,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: charged0, }), @@ -3349,15 +3088,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: charged1, @@ -3366,7 +3097,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { + event: Event::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: charged1, }), @@ -3374,15 +3105,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::ReserveRepatriated { + event: Event::Balances(pallet_balances::Event::ReserveRepatriated { from: addr.clone(), to: ALICE, amount: refunded0, @@ -3406,7 +3129,7 @@ fn set_code_extrinsic() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3416,65 +3139,60 @@ fn set_code_extrinsic() { )); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - new_wasm, - None, - Determinism::Deterministic - )); + assert_ok!(Contracts::upload_code(Origin::signed(ALICE), new_wasm, None,)); // Drop previous events initialize_block(2); - assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); // only root can execute this extrinsic assert_noop!( - Contracts::set_code(RuntimeOrigin::signed(ALICE), addr.clone(), new_code_hash), + Contracts::set_code(Origin::signed(ALICE), addr.clone(), new_code_hash), sp_runtime::traits::BadOrigin, ); - assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); assert_eq!(System::events(), vec![],); // contract must exist assert_noop!( - Contracts::set_code(RuntimeOrigin::root(), BOB, new_code_hash), + Contracts::set_code(Origin::root(), BOB, new_code_hash), >::ContractNotFound, ); - assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); assert_eq!(System::events(), vec![],); // new code hash must exist assert_noop!( - Contracts::set_code(RuntimeOrigin::root(), addr.clone(), Default::default()), + Contracts::set_code(Origin::root(), addr.clone(), Default::default()), >::CodeNotFound, ); - assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); assert_eq!(System::events(), vec![],); // successful call - assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr.clone(), new_code_hash)); - assert_eq!(get_contract(&addr).code_hash, new_code_hash); + assert_ok!(Contracts::set_code(Origin::root(), addr.clone(), new_code_hash)); + assert_eq!(>::get(&addr).unwrap().code_hash, new_code_hash); assert_refcount!(&code_hash, 0); assert_refcount!(&new_code_hash, 1); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(pallet_contracts::Event::ContractCodeUpdated { - contract: addr.clone(), + event: Event::Contracts(pallet_contracts::Event::ContractCodeUpdated { + contract: addr, new_code_hash, old_code_hash: code_hash, }), - topics: vec![hash(&addr), new_code_hash, code_hash], + topics: vec![], },] ); }); @@ -3488,7 +3206,7 @@ fn call_after_killed_account_needs_funding() { let min_balance = ::Currency::minimum_balance(); assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 700, GAS_LIMIT, None, @@ -3503,7 +3221,7 @@ fn call_after_killed_account_needs_funding() { // Destroy the account of the contract by slashing. // Slashing can actually happen if the contract takes part in staking. - // It is a corner case and we accept the destruction of the account. + // It is a corner case and we except the destruction of the account. let _ = ::Currency::slash( &addr, ::Currency::total_balance(&addr), @@ -3513,7 +3231,7 @@ fn call_after_killed_account_needs_funding() { // account in order to send balance there. assert_err_ignore_postinfo!( Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), min_balance - 1, GAS_LIMIT, @@ -3525,7 +3243,7 @@ fn call_after_killed_account_needs_funding() { // Sending zero should work as it does not do a transfer assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -3535,7 +3253,7 @@ fn call_after_killed_account_needs_funding() { // Sending minimum balance should work assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), addr.clone(), min_balance, GAS_LIMIT, @@ -3548,14 +3266,14 @@ fn call_after_killed_account_needs_funding() { vec![ EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::KilledAccount { + event: Event::System(frame_system::Event::KilledAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Slashed { + event: Event::Balances(pallet_balances::Event::Slashed { who: addr.clone(), amount: min_balance + 700 }), @@ -3563,22 +3281,12 @@ fn call_after_killed_account_needs_funding() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: addr.clone() - }), + event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + event: Event::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance }), @@ -3586,21 +3294,13 @@ fn call_after_killed_account_needs_funding() { }, EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + event: Event::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance }), topics: vec![], }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&addr)], - }, ] ); }); @@ -3617,17 +3317,12 @@ fn contract_reverted() { let input = (flags.bits(), buffer).encode(); // We just upload the code for later use - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Deterministic - )); + assert_ok!(Contracts::upload_code(Origin::signed(ALICE), wasm.clone(), None)); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( Contracts::instantiate( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3641,7 +3336,7 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3669,7 +3364,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.result.flags, flags); - assert_eq!(result.result.data, buffer); + assert_eq!(result.result.data.0, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. @@ -3689,32 +3384,16 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone() - ), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, input.clone()), >::ContractReverted, ); // Calling directly: revert leads to success but the flags indicate the error - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input, - false, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input, false) + .result + .unwrap(); assert_eq!(result.flags, flags); - assert_eq!(result.data, buffer); + assert_eq!(result.data.0, buffer); }); } @@ -3725,12 +3404,7 @@ fn code_rejected_error_works() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Deterministic - ), + Contracts::upload_code(Origin::signed(ALICE), wasm.clone(), None), >::CodeRejected, ); @@ -3739,7 +3413,7 @@ fn code_rejected_error_works() { 0, GAS_LIMIT, None, - Code::Upload(wasm), + Code::Upload(Bytes(wasm)), vec![], vec![], true, @@ -3764,7 +3438,7 @@ fn set_code_hash() { // Instantiate the 'caller' assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), + Origin::signed(ALICE), 300_000, GAS_LIMIT, None, @@ -3773,14 +3447,7 @@ fn set_code_hash() { vec![], )); // upload new code - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - new_wasm.clone(), - None, - Determinism::Deterministic - )); - - System::reset_events(); + assert_ok!(Contracts::upload_code(Origin::signed(ALICE), new_wasm.clone(), None)); // First call sets new code_hash and returns 1 let result = Contracts::bare_call( @@ -3791,595 +3458,30 @@ fn set_code_hash() { None, new_code_hash.as_ref().to_vec(), true, - Determinism::Deterministic, ) .result .unwrap(); assert_return_code!(result, 1); // Second calls new contract code that returns 2 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - true, - Determinism::Deterministic, - ) - .result - .unwrap(); + let result = + Contracts::bare_call(ALICE, contract_addr.clone(), 0, GAS_LIMIT, None, vec![], true) + .result + .unwrap(); assert_return_code!(result, 2); // Checking for the last event only assert_eq!( - &System::events(), - &[ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::ContractCodeUpdated { - contract: contract_addr.clone(), - new_code_hash, - old_code_hash: code_hash, - }), - topics: vec![hash(&contract_addr), new_code_hash, code_hash], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: contract_addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&contract_addr)], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: ALICE, - contract: contract_addr.clone(), - }), - topics: vec![hash(&ALICE), hash(&contract_addr)], - }, - ], - ); - }); -} - -#[test] -fn storage_deposit_limit_is_enforced() { - let (wasm, code_hash) = compile_module::("store").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let min_balance = ::Currency::minimum_balance(); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - - // Check that the BOB contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), min_balance); - assert_eq!(::Currency::total_balance(&addr), min_balance); - - // Create 100 bytes of storage with a price of per byte - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(1)), - 100u32.to_le_bytes().to_vec() - ), - >::StorageDepositLimitExhausted, - ); - }); -} - -#[test] -fn storage_deposit_limit_is_enforced_late() { - let (wasm_caller, code_hash_caller) = - compile_module::("create_storage_and_call").unwrap(); - let (wasm_callee, code_hash_callee) = compile_module::("store").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - // Create both contracts: Constructors do nothing. - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm_caller, - vec![], - vec![], - )); - let addr_caller = Contracts::contract_address(&ALICE, &code_hash_caller, &[]); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm_callee, - vec![], - vec![], - )); - let addr_callee = Contracts::contract_address(&ALICE, &code_hash_callee, &[]); - - // Create 100 bytes of storage with a price of per byte - // This is 100 Balance + 2 Balance for the item - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_callee.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(102)), - 100u32.to_le_bytes().to_vec() - )); - - // We do not remove any storage but require 14 bytes of storage for the new - // storage created in the immediate contract. - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(5)), - 100u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), - ), - >::StorageDepositLimitExhausted, - ); - - // Allow for the additional 14 bytes but demand an additional byte in the callee contract. - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(14)), - 101u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), - ), - >::StorageDepositLimitExhausted, - ); - - // Refund in the callee contract but not enough to cover the 14 balance required by the - // caller. - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(0)), - 87u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), - ), - >::StorageDepositLimitExhausted, - ); - - let _ = Balances::make_free_balance_be(&ALICE, 1_000); - - // Send more than the sender has balance. - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(50)), - 1_200u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), - ), - >::StorageDepositLimitExhausted, - ); - - // Same as above but allow for the additional balance. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(1)), - 87u32 - .to_le_bytes() - .as_ref() - .iter() - .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) - .cloned() - .collect(), - )); - }); -} - -#[test] -fn deposit_limit_honors_liquidity_restrictions() { - let (wasm, code_hash) = compile_module::("store").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000); - let min_balance = ::Currency::minimum_balance(); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - - // Check that the contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), min_balance); - assert_eq!(::Currency::total_balance(&addr), min_balance); - - // check that the lock ins honored - Balances::set_lock([0; 8], &BOB, 1_000, WithdrawReasons::TRANSFER); - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(200)), - 100u32.to_le_bytes().to_vec() - ), - >::StorageDepositNotEnoughFunds, - ); - assert_eq!(Balances::free_balance(&BOB), 1_000); - }); -} - -#[test] -fn deposit_limit_honors_existential_deposit() { - let (wasm, code_hash) = compile_module::("store").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000); - let min_balance = ::Currency::minimum_balance(); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - - // Check that the contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), min_balance); - assert_eq!(::Currency::total_balance(&addr), min_balance); - - // check that the deposit can't bring the account below the existential deposit - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(900)), - 100u32.to_le_bytes().to_vec() - ), - >::StorageDepositNotEnoughFunds, - ); - assert_eq!(Balances::free_balance(&BOB), 1_000); - }); -} - -#[test] -fn deposit_limit_honors_min_leftover() { - let (wasm, code_hash) = compile_module::("store").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&BOB, 1_000); - let min_balance = ::Currency::minimum_balance(); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - - // Check that the contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), min_balance); - assert_eq!(::Currency::total_balance(&addr), min_balance); - - // check that the minumum leftover (value send) is considered - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 400, - GAS_LIMIT, - Some(codec::Compact(500)), - 100u32.to_le_bytes().to_vec() - ), - >::StorageDepositNotEnoughFunds, - ); - assert_eq!(Balances::free_balance(&BOB), 1_000); - }); -} - -#[test] -fn cannot_instantiate_indeterministic_code() { - let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); - let (caller_wasm, _) = compile_module::("instantiate_return_code").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - // Try to instantiate directly from code - assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm.clone(), - vec![], - vec![], - ), - >::CodeRejected, - ); - assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![], - false, - ) - .result, - >::CodeRejected, - ); - - // Try to upload a non deterministic code as deterministic - assert_err!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Deterministic - ), - >::CodeRejected, - ); - - // Try to instantiate from already stored indeterministic code hash - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - None, - Determinism::AllowIndeterminism, - )); - assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), - >::Indeterministic, - ); - assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - false, - ) - .result, - >::Indeterministic, - ); - - // Deploy contract which instantiates another contract - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - false, - ) - .result - .unwrap() - .account_id; - - // Try to instantiate `code_hash` from another contract in deterministic mode - assert_err!( - >::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - code_hash.encode(), - false, - Determinism::Deterministic, - ) - .result, - >::Indeterministic, - ); - - // Instantiations are not allowed even in non determinism mode - assert_err!( - >::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - code_hash.encode(), - false, - Determinism::AllowIndeterminism, - ) - .result, - >::Indeterministic, - ); - }); -} - -#[test] -fn cannot_set_code_indeterministic_code() { - let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); - let (caller_wasm, _) = compile_module::("set_code_hash").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - // Put the non deterministic contract on-chain - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - None, - Determinism::AllowIndeterminism, - )); - - // Create the contract that will call `seal_set_code_hash` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - false, - ) - .result - .unwrap() - .account_id; - - // We do not allow to set the code hash to a non determinstic wasm - assert_err!( - >::bare_call( - ALICE, - caller_addr.clone(), - 0, - GAS_LIMIT, - None, - code_hash.encode(), - false, - Determinism::AllowIndeterminism, - ) - .result, - >::Indeterministic, - ); - }); -} - -#[test] -fn delegate_call_indeterministic_code() { - let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); - let (caller_wasm, _) = compile_module::("delegate_call_simple").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - // Put the non deterministic contract on-chain - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - None, - Determinism::AllowIndeterminism, - )); - - // Create the contract that will call `seal_delegate_call` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - false, - ) - .result - .unwrap() - .account_id; - - // The delegate call will fail in deterministic mode - assert_err!( - >::bare_call( - ALICE, - caller_addr.clone(), - 0, - GAS_LIMIT, - None, - code_hash.encode(), - false, - Determinism::Deterministic, - ) - .result, - >::Indeterministic, - ); - - // The delegate call will work on non deterministic mode - assert_ok!( - >::bare_call( - ALICE, - caller_addr.clone(), - 0, - GAS_LIMIT, - None, - code_hash.encode(), - false, - Determinism::AllowIndeterminism, - ) - .result + System::events().pop().unwrap(), + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::ContractCodeUpdated { + contract: contract_addr.clone(), + new_code_hash: new_code_hash.clone(), + old_code_hash: code_hash.clone(), + }), + topics: vec![], + }, ); }); } diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 3ede6db6db5a1..10de436bfb155 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -39,11 +39,9 @@ use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, traits::{Get, ReservableCurrency}, - WeakBoundedVec, }; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::BadOrigin; -use sp_std::vec; /// Put the instrumented module in storage. /// @@ -98,7 +96,7 @@ where >::insert(&code_hash, orig_code); >::insert(&code_hash, owner_info); *existing = Some(module); - >::deposit_event(vec![code_hash], Event::CodeStored { code_hash }); + >::deposit_event(Event::CodeStored { code_hash }); Ok(()) }, }) @@ -135,10 +133,7 @@ pub fn increment_refcount(code_hash: CodeHash) -> Result<(), Dispa } /// Try to remove code together with all associated information. -pub fn try_remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult -where - T::AccountId: UncheckedFrom + AsRef<[u8]>, -{ +pub fn try_remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { >::try_mutate_exists(&code_hash, |existing| { if let Some(owner_info) = existing { ensure!(owner_info.refcount == 0, >::CodeInUse); @@ -147,7 +142,7 @@ where *existing = None; >::remove(&code_hash); >::remove(&code_hash); - >::deposit_event(vec![code_hash], Event::CodeRemoved { code_hash }); + >::deposit_event(Event::CodeRemoved { code_hash }); Ok(()) } else { Err(>::CodeNotFound.into()) @@ -196,15 +191,10 @@ pub fn reinstrument( let original_code = >::get(&prefab_module.code_hash).ok_or(Error::::CodeNotFound)?; let original_code_len = original_code.len(); - // We need to allow contracts growing too big after re-instrumentation. Otherwise - // the contract can become inaccessible. The user has no influence over this size - // as the contract is already deployed and every change in size would be the result - // of changes in the instrumentation algorithm controlled by the chain authors. - prefab_module.code = WeakBoundedVec::force_from( - prepare::reinstrument_contract::(&original_code, schedule, prefab_module.determinism) - .map_err(|_| >::CodeRejected)?, - Some("Contract exceeds limit after re-instrumentation."), - ); + prefab_module.code = prepare::reinstrument_contract::(&original_code, schedule) + .map_err(|_| >::CodeRejected)? + .try_into() + .map_err(|_| >::CodeTooLarge)?; prefab_module.instruction_weights_version = schedule.instruction_weights.version; >::insert(&prefab_module.code_hash, &*prefab_module); Ok(original_code_len as u32) @@ -225,14 +215,17 @@ impl Token for CodeToken { use self::CodeToken::*; // In case of `Load` we already covered the general costs of // calling the storage but still need to account for the actual size of the - // contract code. This is why we subtract `T::*::(0)`. We need to do this at this + // contract code. This is why we substract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. match *self { Reinstrument(len) => T::WeightInfo::reinstrument(len), - Load(len) => T::WeightInfo::call_with_code_per_byte(len) - .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)) - .set_proof_size(len.into()), + Load(len) => { + let computation = T::WeightInfo::call_with_code_per_byte(len) + .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); + let bandwith = T::ContractAccessWeight::get().saturating_mul(len.into()); + computation.max(bandwith) + }, } } } diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs new file mode 100644 index 0000000000000..aa5a1626681f4 --- /dev/null +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -0,0 +1,396 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Definition of macros that hides boilerplate of defining external environment +//! for a wasm module. +//! +//! Most likely you should use `define_env` macro. + +macro_rules! convert_args { + () => (vec![]); + ( $( $t:ty ),* ) => ( vec![ $( { use $crate::wasm::env_def::ConvertibleToWasm; <$t>::VALUE_TYPE }, )* ] ); +} + +macro_rules! gen_signature { + ( ( $( $params: ty ),* ) ) => ( + { + wasm_instrument::parity_wasm::elements::FunctionType::new( + convert_args!($($params),*), vec![], + ) + } + ); + + ( ( $( $params: ty ),* ) -> $returns: ty ) => ( + { + wasm_instrument::parity_wasm::elements::FunctionType::new( + convert_args!($($params),*), + vec![{use $crate::wasm::env_def::ConvertibleToWasm; <$returns>::VALUE_TYPE}], + ) + } + ); +} + +macro_rules! gen_signature_dispatch { + ( + $needle_module:ident, + $needle_name:ident, + $needle_sig:ident ; + $module:ident, + $name:ident + ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* + ) => { + let module = stringify!($module).as_bytes(); + if module == $needle_module && stringify!($name).as_bytes() == $needle_name { + let signature = gen_signature!( ( $( $params ),* ) $( -> $returns )* ); + if $needle_sig == &signature { + return true; + } + } else { + gen_signature_dispatch!($needle_module, $needle_name, $needle_sig ; $($rest)*); + } + }; + ( $needle_module:ident, $needle_name:ident, $needle_sig:ident ; ) => {}; +} + +/// Unmarshall arguments and then execute `body` expression and return its result. +macro_rules! unmarshall_then_body { + ( $body:tt, $ctx:ident, $args_iter:ident, $( $names:ident : $params:ty ),* ) => ({ + $( + let $names : <$params as $crate::wasm::env_def::ConvertibleToWasm>::NativeType = + $args_iter.next() + .and_then(|v| <$params as $crate::wasm::env_def::ConvertibleToWasm> + ::from_typed_value(v.clone())) + .expect( + "precondition: all imports should be checked against the signatures of corresponding + functions defined by `define_env!` macro by the user of the macro; + signatures of these functions defined by `$params`; + calls always made with arguments types of which are defined by the corresponding imports; + thus types of arguments should be equal to type list in `$params` and + length of argument list and $params should be equal; + thus this can never be `None`; + qed; + " + ); + )* + $body + }) +} + +/// Since we can't specify the type of closure directly at binding site: +/// +/// ```nocompile +/// let f: FnOnce() -> Result<::NativeType, _> = || { /* ... */ }; +/// ``` +/// +/// we use this function to constrain the type of the closure. +#[inline(always)] +pub fn constrain_closure(f: F) -> F +where + F: FnOnce() -> Result, +{ + f +} + +macro_rules! unmarshall_then_body_then_marshall { + ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) -> $returns:ty => $body:tt ) => ({ + let body = $crate::wasm::env_def::macros::constrain_closure::< + <$returns as $crate::wasm::env_def::ConvertibleToWasm>::NativeType, _ + >(|| { + unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) + }); + let r = body().map_err(|reason| { + $ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; + return Ok(sp_sandbox::ReturnValue::Value({ use $crate::wasm::env_def::ConvertibleToWasm; r.to_typed_value() })) + }); + ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) => $body:tt ) => ({ + let body = $crate::wasm::env_def::macros::constrain_closure::<(), _>(|| { + unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) + }); + body().map_err(|reason| { + $ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; + return Ok(sp_sandbox::ReturnValue::Unit) + }) +} + +macro_rules! define_func { + ( $trait:tt $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { + fn $name< E: $trait >( + $ctx: &mut $crate::wasm::Runtime, + args: &[sp_sandbox::Value], + ) -> Result + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { + #[allow(unused)] + let mut args = args.iter(); + + unmarshall_then_body_then_marshall!( + args, + $ctx, + ( $( $names : $params ),* ) $( -> $returns )* => $body + ) + } + }; +} + +macro_rules! register_body { + ( $reg_cb:ident, $trait:tt; + $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt + ) => { + $reg_cb( + stringify!($module).as_bytes(), + stringify!($name).as_bytes(), + { + define_func!( + $trait $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body + ); + $name:: + } + ); + } +} + +macro_rules! register_func { + ( $reg_cb:ident, $trait:tt; ) => {}; + + ( $reg_cb:ident, $trait:tt; + __unstable__ $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt $($rest:tt)* + ) => { + #[cfg(feature = "unstable-interface")] + register_body!( + $reg_cb, $trait; + __unstable__ $name + ( $ctx $( , $names : $params )* ) + $( -> $returns )* => $body + ); + register_func!( $reg_cb, $trait; $($rest)* ); + }; + + ( $reg_cb:ident, $trait:tt; + $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt $($rest:tt)* + ) => { + register_body!( + $reg_cb, $trait; + $module $name + ( $ctx $( , $names : $params )* ) + $( -> $returns )* => $body + ); + register_func!( $reg_cb, $trait; $($rest)* ); + }; +} + +/// Define a function set that can be imported by executing wasm code. +/// +/// **NB**: Be advised that all functions defined by this macro +/// will panic if called with unexpected arguments. +/// +/// It's up to the user of this macro to check signatures of wasm code to be executed +/// and reject the code if any imported function has a mismatched signature. +macro_rules! define_env { + ( $init_name:ident , < E: $trait:tt > , + $( [$module:ident] $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt , )* + ) => { + pub struct $init_name; + + impl $crate::wasm::env_def::ImportSatisfyCheck for $init_name { + fn can_satisfy( + module: &[u8], + name: &[u8], + func_type: &wasm_instrument::parity_wasm::elements::FunctionType, + ) -> bool + { + #[cfg(not(feature = "unstable-interface"))] + if module == b"__unstable__" { + return false; + } + gen_signature_dispatch!( + module, name, func_type ; + $( $module, $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* + ); + + return false; + } + } + + impl $crate::wasm::env_def::FunctionImplProvider for $init_name + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { + fn impls)>(f: &mut F) { + register_func!( + f, + $trait; + $( $module $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* + ); + } + } + }; +} + +#[cfg(test)] +mod tests { + use crate::{ + exec::Ext, + wasm::{runtime::TrapReason, tests::MockExt, Runtime}, + Weight, + }; + use sp_runtime::traits::Zero; + use sp_sandbox::{ReturnValue, Value}; + use wasm_instrument::parity_wasm::elements::{FunctionType, ValueType}; + + struct TestRuntime { + value: u32, + } + + impl TestRuntime { + fn set_trap_reason(&mut self, _reason: TrapReason) {} + } + + #[test] + fn macro_unmarshall_then_body_then_marshall_value_or_trap() { + fn test_value( + _ctx: &mut TestRuntime, + args: &[sp_sandbox::Value], + ) -> Result { + let mut args = args.iter(); + unmarshall_then_body_then_marshall!( + args, + _ctx, + (a: u32, b: u32) -> u32 => { + if b == 0 { + Err(crate::wasm::runtime::TrapReason::Termination) + } else { + Ok(a / b) + } + } + ) + } + + let ctx = &mut TestRuntime { value: 0 }; + assert_eq!( + test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), + ReturnValue::Value(Value::I32(5)), + ); + assert!(test_value(ctx, &[Value::I32(15), Value::I32(0)]).is_err()); + } + + #[test] + fn macro_unmarshall_then_body_then_marshall_unit() { + fn test_unit( + ctx: &mut TestRuntime, + args: &[sp_sandbox::Value], + ) -> Result { + let mut args = args.iter(); + unmarshall_then_body_then_marshall!( + args, + ctx, + (a: u32, b: u32) => { + ctx.value = a + b; + Ok(()) + } + ) + } + + let ctx = &mut TestRuntime { value: 0 }; + let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); + assert_eq!(result, ReturnValue::Unit); + assert_eq!(ctx.value, 5); + } + + #[test] + fn macro_define_func() { + define_func!( Ext seal_gas (_ctx, amount: u32) => { + let amount = Weight::from(amount); + if !amount.is_zero() { + Ok(()) + } else { + Err(TrapReason::Termination) + } + }); + let _f: fn( + &mut Runtime, + &[sp_sandbox::Value], + ) -> Result = seal_gas::; + } + + #[test] + fn macro_gen_signature() { + assert_eq!(gen_signature!((i32)), FunctionType::new(vec![ValueType::I32], vec![])); + + assert_eq!( + gen_signature!( (i32, u32) -> u32 ), + FunctionType::new(vec![ValueType::I32, ValueType::I32], vec![ValueType::I32]), + ); + } + + #[test] + fn macro_unmarshall_then_body() { + let args = vec![Value::I32(5), Value::I32(3)]; + let mut args = args.iter(); + + let ctx: &mut u32 = &mut 0; + + let r = unmarshall_then_body!( + { + *ctx = a + b; + a * b + }, + ctx, + args, + a: u32, + b: u32 + ); + + assert_eq!(*ctx, 8); + assert_eq!(r, 15); + } + + #[test] + fn macro_define_env() { + use crate::wasm::env_def::ImportSatisfyCheck; + + define_env!(Env, , + [seal0] seal_gas( _ctx, amount: u32 ) => { + let amount = Weight::from(amount); + if !amount.is_zero() { + Ok(()) + } else { + Err(crate::wasm::runtime::TrapReason::Termination) + } + }, + ); + + assert!(Env::can_satisfy( + b"seal0", + b"seal_gas", + &FunctionType::new(vec![ValueType::I32], vec![]) + )); + assert!(!Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![]))); + } +} diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index be6e688c97868..b4c5ffe81e7c1 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -21,6 +21,9 @@ use crate::exec::Ext; use sp_sandbox::Value; use wasm_instrument::parity_wasm::elements::{FunctionType, ValueType}; +#[macro_use] +pub mod macros; + pub trait ConvertibleToWasm: Sized { const VALUE_TYPE: ValueType; type NativeType; @@ -28,8 +31,8 @@ pub trait ConvertibleToWasm: Sized { fn from_typed_value(_: Value) -> Option; } impl ConvertibleToWasm for i32 { - const VALUE_TYPE: ValueType = ValueType::I32; type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; fn to_typed_value(self) -> Value { Value::I32(self) } @@ -38,8 +41,8 @@ impl ConvertibleToWasm for i32 { } } impl ConvertibleToWasm for u32 { - const VALUE_TYPE: ValueType = ValueType::I32; type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; fn to_typed_value(self) -> Value { Value::I32(self as i32) } @@ -51,8 +54,8 @@ impl ConvertibleToWasm for u32 { } } impl ConvertibleToWasm for u64 { - const VALUE_TYPE: ValueType = ValueType::I64; type NativeType = u64; + const VALUE_TYPE: ValueType = ValueType::I64; fn to_typed_value(self) -> Value { Value::I64(self as i64) } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 9a094ad4f7da0..3dd5da187b258 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -35,9 +35,12 @@ use crate::{ Schedule, }; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::dispatch::{DispatchError, DispatchResult}; +use frame_support::{ + dispatch::{DispatchError, DispatchResult}, + ensure, + traits::Get, +}; use sp_core::crypto::UncheckedFrom; -use sp_runtime::RuntimeDebug; use sp_sandbox::{SandboxEnvironmentBuilder, SandboxInstance, SandboxMemory}; use sp_std::prelude::*; #[cfg(test)] @@ -67,10 +70,6 @@ pub struct PrefabWasmModule { maximum: u32, /// Code instrumented with the latest schedule. code: RelaxedCodeVec, - /// A code that might contain non deterministic features and is therefore never allowed - /// to be run on chain. Specifically this code can never be instantiated into a contract - /// and can just be used through a delegate call. - determinism: Determinism, /// The uninstrumented, pristine version of the code. /// /// It is not stored because the pristine code has its own storage item. The value @@ -107,27 +106,6 @@ pub struct OwnerInfo { refcount: u64, } -/// Defines the required determinism level of a wasm blob when either running or uploading code. -#[derive( - Clone, Copy, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen, RuntimeDebug, PartialEq, Eq, -)] -pub enum Determinism { - /// The execution should be deterministic and hence no indeterministic instructions are - /// allowed. - /// - /// Dispatchables always use this mode in order to make on-chain execution deterministic. - Deterministic, - /// Allow calling or uploading an indeterministic code. - /// - /// This is only possible when calling into `pallet-contracts` directly via - /// [`crate::Pallet::bare_call`]. - /// - /// # Note - /// - /// **Never** use this mode for on-chain execution. - AllowIndeterminism, -} - impl ExportedFunction { /// The wasm export name for the function. fn identifier(&self) -> &str { @@ -150,14 +128,18 @@ where original_code: Vec, schedule: &Schedule, owner: AccountIdOf, - determinism: Determinism, ) -> Result { let module = prepare::prepare_contract( original_code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, schedule, owner, - determinism, )?; + // When instrumenting a new code we apply a stricter limit than enforced by the + // `RelaxedCodeVec` in order to leave some headroom for reinstrumentation. + ensure!( + module.code.len() as u32 <= T::MaxCodeLen::get(), + (>::CodeTooLarge.into(), ""), + ); Ok(module) } @@ -286,10 +268,6 @@ where fn code_len(&self) -> u32 { self.code.len() as u32 } - - fn is_deterministic(&self) -> bool { - matches!(self.determinism, Determinism::Deterministic) - } } #[cfg(test)] @@ -302,18 +280,15 @@ mod tests { }, gas::GasMeter, storage::WriteOutcome, - tests::{RuntimeCall, Test, ALICE, BOB}, + tests::{Call, Test, ALICE, BOB}, BalanceOf, CodeHash, Error, Pallet as Contracts, }; use assert_matches::assert_matches; - use frame_support::{ - assert_ok, - dispatch::DispatchResultWithPostInfo, - weights::{OldWeight, Weight}, - }; + use frame_support::{assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight}; + use hex_literal::hex; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; - use sp_core::H256; + use sp_core::{Bytes, H256}; use sp_runtime::DispatchError; use std::{ borrow::BorrowMut, @@ -364,7 +339,7 @@ mod tests { transfers: Vec, // (topics, data) events: Vec<(Vec, Vec)>, - runtime_calls: RefCell>, + runtime_calls: RefCell>, schedule: Schedule, gas_meter: GasMeter, debug_buffer: Vec, @@ -373,8 +348,8 @@ mod tests { } /// The call is mocked and just returns this hardcoded value. - fn call_return_data() -> Vec { - vec![0xDE, 0xAD, 0xBE, 0xEF] + fn call_return_data() -> Bytes { + Bytes(vec![0xDE, 0xAD, 0xBE, 0xEF]) } impl Default for MockExt { @@ -390,7 +365,7 @@ mod tests { events: Default::default(), runtime_calls: Default::default(), schedule: Default::default(), - gas_meter: GasMeter::new(Weight::from_ref_time(10_000_000_000)), + gas_meter: GasMeter::new(10_000_000_000), debug_buffer: Default::default(), ecdsa_recover: Default::default(), } @@ -428,15 +403,15 @@ mod tests { salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { - code_hash, + code_hash: code_hash.clone(), value, data: data.to_vec(), - gas_left: gas_limit.ref_time(), + gas_left: gas_limit, salt: salt.to_vec(), }); Ok(( Contracts::::contract_address(&ALICE, &code_hash, salt), - ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, )) } fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { @@ -545,7 +520,7 @@ mod tests { 16_384 } fn get_weight_price(&self, weight: Weight) -> BalanceOf { - BalanceOf::::from(1312_u32).saturating_mul(weight.ref_time().into()) + BalanceOf::::from(1312_u32).saturating_mul(weight.into()) } fn schedule(&self) -> &Schedule { &self.schedule @@ -557,10 +532,7 @@ mod tests { self.debug_buffer.extend(msg.as_bytes()); true } - fn call_runtime( - &self, - call: ::RuntimeCall, - ) -> DispatchResultWithPostInfo { + fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo { self.runtime_calls.borrow_mut().push(call); Ok(Default::default()) } @@ -569,7 +541,7 @@ mod tests { signature: &[u8; 65], message_hash: &[u8; 32], ) -> Result<[u8; 33], ()> { - self.ecdsa_recover.borrow_mut().push((*signature, *message_hash)); + self.ecdsa_recover.borrow_mut().push((signature.clone(), message_hash.clone())); Ok([3; 33]) } fn contract_info(&mut self) -> &mut crate::ContractInfo { @@ -583,13 +555,8 @@ mod tests { fn execute>(wat: &str, input_data: Vec, mut ext: E) -> ExecResult { let wasm = wat::parse_str(wat).unwrap(); let schedule = crate::Schedule::default(); - let executable = PrefabWasmModule::<::T>::from_code( - wasm, - &schedule, - ALICE, - Determinism::Deterministic, - ) - .unwrap(); + let executable = + PrefabWasmModule::<::T>::from_code(wasm, &schedule, ALICE).unwrap(); executable.execute(ext.borrow_mut(), &ExportedFunction::Call, input_data) } @@ -841,7 +808,7 @@ mod tests { let mut mock_ext = MockExt::default(); let input = vec![0xff, 0x2a, 0x99, 0x88]; let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); - assert_eq!(result.data, input); + assert_eq!(result.data.0, input); assert_eq!( &mock_ext.calls, &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: true }] @@ -896,12 +863,73 @@ mod tests { } #[test] + #[cfg(not(feature = "unstable-interface"))] + fn contains_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_contains_storage" (func $seal_contains_storage (param i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 4) size of input buffer (32 bytes as we copy the key here) + (data (i32.const 0) "\20") + + ;; [4, 36) input buffer + ;; [36, inf) output buffer + + (func (export "call") + ;; Receive key + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + + ;; Load the return value into the output buffer + (i32.store (i32.const 36) + (call $seal_contains_storage + (i32.const 4) ;; The pointer to the storage key to fetch + ) + ) + + ;; Return the contents of the buffer + (call $seal_return + (i32.const 0) ;; flags + (i32.const 36) ;; output buffer ptr + (i32.const 4) ;; result is integer (4 bytes) + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + ext.storage.insert(vec![1u8; 32], vec![42u8]); + ext.storage.insert(vec![2u8; 32], vec![]); + + // value does not exist -> sentinel value returned + let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + + // value did exist -> success + let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1,); + + // value did exist -> success (zero sized type) + let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0,); + } + + #[test] + #[cfg(feature = "unstable-interface")] fn contains_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal1" "contains_storage" (func $contains_storage (param i32 i32) (result i32))) + (import "__unstable__" "seal_contains_storage" (func $seal_contains_storage (param i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) @@ -919,7 +947,7 @@ mod tests { ) ;; Call seal_clear_storage and save what it returns at 0 (i32.store (i32.const 0) - (call $contains_storage + (call $seal_contains_storage (i32.const 8) ;; key_ptr (i32.load (i32.const 4)) ;; key_len ) @@ -953,13 +981,13 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); // value exists let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); @@ -967,7 +995,7 @@ mod tests { let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[2u8; 19].to_vec()).unwrap(), &([] as [u8; 0])); } @@ -1210,7 +1238,7 @@ mod tests { let output = execute(CODE_ECDSA_TO_ETH_ADDRESS, vec![], MockExt::default()).unwrap(); assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: [0x02; 20].to_vec() } + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x02; 20].to_vec()) } ); } @@ -1287,7 +1315,7 @@ mod tests { assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() } + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x22; 32].to_vec()) } ); } @@ -1525,11 +1553,10 @@ mod tests { let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); - let OldWeight(gas_left) = OldWeight::decode(&mut &*output.data).unwrap(); + let gas_left = Weight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); - // TODO: account for proof size weight - assert!(gas_left < gas_limit.ref_time(), "gas_left must be less than initial"); - assert!(gas_left > actual_left.ref_time(), "gas_left must be greater than final"); + assert!(gas_left < gas_limit, "gas_left must be less than initial"); + assert!(gas_left > actual_left, "gas_left must be greater than final"); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -1606,7 +1633,10 @@ mod tests { fn return_from_start_fn() { let output = execute(CODE_RETURN_FROM_START_FN, vec![], MockExt::default()).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); + assert_eq!( + output, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) } + ); } const CODE_TIMESTAMP_NOW: &str = r#" @@ -1648,53 +1678,11 @@ mod tests { ) (func (export "deploy")) ) -"#; - - const CODE_TIMESTAMP_NOW_UNPREFIXED: &str = r#" -(module - (import "seal0" "now" (func $now (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; size of our buffer is 32 bytes - (data (i32.const 32) "\20") - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "call") - ;; This stores the block timestamp in the buffer - (call $now (i32.const 0) (i32.const 32)) - - ;; assert len == 8 - (call $assert - (i32.eq - (i32.load (i32.const 32)) - (i32.const 8) - ) - ) - - ;; assert that contents of the buffer is equal to the i64 value of 1111. - (call $assert - (i64.eq - (i64.load (i32.const 0)) - (i64.const 1111) - ) - ) - ) - (func (export "deploy")) -) "#; #[test] fn now() { assert_ok!(execute(CODE_TIMESTAMP_NOW, vec![], MockExt::default())); - assert_ok!(execute(CODE_TIMESTAMP_NOW_UNPREFIXED, vec![], MockExt::default())); } const CODE_MINIMUM_BALANCE: &str = r#" @@ -1805,9 +1793,10 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: array_bytes::hex_into_unchecked( - "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" - ) + data: Bytes( + hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F") + .to_vec() + ), }, ); } @@ -1875,13 +1864,13 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: ( - array_bytes::hex2array_unchecked::<32>( - "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" - ), - 42u64, - ) - .encode() + data: Bytes( + ( + hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F"), + 42u64, + ) + .encode() + ), }, ); } @@ -1922,7 +1911,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left().ref_time() > 0); + assert!(mock_ext.gas_meter.gas_left() > 0); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" @@ -2086,7 +2075,7 @@ mod tests { fn seal_return_with_success_status() { let output = execute( CODE_RETURN_WITH_DATA, - array_bytes::hex2bytes_unchecked("00000000445566778899"), + hex!("00000000445566778899").to_vec(), MockExt::default(), ) .unwrap(); @@ -2095,7 +2084,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: array_bytes::hex2bytes_unchecked("445566778899"), + data: Bytes(hex!("445566778899").to_vec()), } ); assert!(!output.did_revert()); @@ -2103,18 +2092,15 @@ mod tests { #[test] fn return_with_revert_status() { - let output = execute( - CODE_RETURN_WITH_DATA, - array_bytes::hex2bytes_unchecked("010000005566778899"), - MockExt::default(), - ) - .unwrap(); + let output = + execute(CODE_RETURN_WITH_DATA, hex!("010000005566778899").to_vec(), MockExt::default()) + .unwrap(); assert_eq!( output, ExecReturnValue { flags: ReturnFlags::REVERT, - data: array_bytes::hex2bytes_unchecked("5566778899"), + data: Bytes(hex!("5566778899").to_vec()), } ); assert!(output.did_revert()); @@ -2235,7 +2221,7 @@ mod tests { #[cfg(feature = "unstable-interface")] const CODE_CALL_RUNTIME: &str = r#" (module - (import "__unstable__" "call_runtime" (func $call_runtime (param i32 i32) (result i32))) + (import "__unstable__" "seal_call_runtime" (func $seal_call_runtime (param i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -2252,7 +2238,7 @@ mod tests { ) ;; Just use the call passed as input and store result to memory (i32.store (i32.const 0) - (call $call_runtime + (call $seal_call_runtime (i32.const 4) ;; Pointer where the call is stored (i32.load (i32.const 0)) ;; Size of the call ) @@ -2271,13 +2257,12 @@ mod tests { #[test] #[cfg(feature = "unstable-interface")] fn call_runtime_works() { - let call = - RuntimeCall::System(frame_system::Call::remark { remark: b"Hello World".to_vec() }); + let call = Call::System(frame_system::Call::remark { remark: b"Hello World".to_vec() }); let mut ext = MockExt::default(); let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); assert_eq!(*ext.runtime_calls.borrow(), vec![call]); // 0 = ReturnCode::Success - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); } #[test] @@ -2296,12 +2281,76 @@ mod tests { } #[test] + #[cfg(not(feature = "unstable-interface"))] + fn set_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal1" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func (export "call") + ;; Receive (key ++ value_to_write) + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + ;; Store the passed value to the passed key and store result to memory + (i32.store (i32.const 0) + (call $seal_set_storage + (i32.const 4) ;; key_ptr + (i32.const 36) ;; value_ptr + (i32.sub ;; value_len (input_size - key_size) + (i32.load (i32.const 0)) + (i32.const 32) + ) + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 0) ;; pointer to returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + // value did not exist before -> sentinel returned + let input = ([1u8; 32], [42u8, 48]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); + + // value do exist -> length of old value returned + let input = ([1u8; 32], [0u8; 0]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); + + // value do exist -> length of old value returned (test for zero sized val) + let input = ([1u8; 32], [99u8]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); + } + + #[test] + #[cfg(feature = "unstable-interface")] fn set_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "seal2" "set_storage" (func $set_storage (param i32 i32 i32 i32) (result i32))) + (import "__unstable__" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer @@ -2318,7 +2367,7 @@ mod tests { ) ;; Store the passed value to the passed key and store result to memory (i32.store (i32.const 168) - (call $set_storage + (call $seal_set_storage (i32.const 8) ;; key_ptr (i32.load (i32.const 4)) ;; key_len (i32.add ;; value_ptr = 8 + key_len @@ -2349,29 +2398,30 @@ mod tests { // value did not exist before -> sentinel returned let input = (32, [1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = (32, [1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = (32, [1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } #[test] + #[cfg(feature = "unstable-interface")] fn get_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "seal1" "get_storage" (func $get_storage (param i32 i32 i32 i32) (result i32))) + (import "__unstable__" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) @@ -2392,7 +2442,7 @@ mod tests { ) ;; Load a storage value and result of this call into the output buffer (i32.store (i32.const 168) - (call $get_storage + (call $seal_get_storage (i32.const 12) ;; key_ptr (i32.load (i32.const 8)) ;; key_len (i32.const 172) ;; Pointer to the output buffer @@ -2433,7 +2483,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2441,30 +2491,31 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); - assert_eq!(&result.data[4..], &[42u8]); + assert_eq!(&result.data.0[4..], &[42u8]); // value exists (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); - assert_eq!(&result.data[4..], &([] as [u8; 0])); + assert_eq!(&result.data.0[4..], &([] as [u8; 0])); } #[test] + #[cfg(feature = "unstable-interface")] fn clear_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "seal1" "clear_storage" (func $clear_storage (param i32 i32) (result i32))) + (import "__unstable__" "seal_clear_storage" (func $seal_clear_storage (param i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; size of input buffer @@ -2481,7 +2532,7 @@ mod tests { ) ;; Call seal_clear_storage and save what it returns at 0 (i32.store (i32.const 0) - (call $clear_storage + (call $seal_clear_storage (i32.const 8) ;; key_ptr (i32.load (i32.const 4)) ;; key_len ) @@ -2516,14 +2567,14 @@ mod tests { let input = (32, [3u8; 32]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[3u8; 32].to_vec()), None); // value did exist let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); // value cleared assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); @@ -2531,14 +2582,14 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); // value exists let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned (test for 0 sized) - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); // value cleared assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); } @@ -2550,7 +2601,7 @@ mod tests { (module (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "__unstable__" "take_storage" (func $take_storage (param i32 i32 i32 i32) (result i32))) + (import "__unstable__" "seal_take_storage" (func $seal_take_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) @@ -2572,7 +2623,7 @@ mod tests { ;; Load a storage value and result of this call into the output buffer (i32.store (i32.const 168) - (call $take_storage + (call $seal_take_storage (i32.const 12) ;; key_ptr (i32.load (i32.const 8)) ;; key_len (i32.const 172) ;; Pointer to the output buffer @@ -2615,7 +2666,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2623,21 +2674,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); - assert_eq!(&result.data[4..], &[42u8]); + assert_eq!(&result.data.0[4..], &[42u8]); // value did exist -> length returned (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); - assert_eq!(&result.data[4..], &[0u8; 0]); + assert_eq!(&result.data.0[4..], &[0u8; 0]); } #[test] @@ -2674,7 +2725,10 @@ mod tests { let output = execute(CODE_IS_CONTRACT, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 1u32 (`true`). - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 1u32.encode() },); + assert_eq!( + output, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(1u32.encode()) }, + ); } #[test] @@ -2808,7 +2862,10 @@ mod tests { let output = execute(CODE_CALLER_IS_ORIGIN, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 0u32 (`false`) - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 0u32.encode() },); + assert_eq!( + output, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(0u32.encode()) }, + ); } #[test] diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 3e6b9eee96680..f6fff20de6b1a 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -22,7 +22,7 @@ use crate::{ chain_extension::ChainExtension, storage::meter::Diff, - wasm::{env_def::ImportSatisfyCheck, Determinism, OwnerInfo, PrefabWasmModule}, + wasm::{env_def::ImportSatisfyCheck, OwnerInfo, PrefabWasmModule}, AccountIdOf, CodeVec, Config, Error, Schedule, }; use codec::{Encode, MaxEncodedLen}; @@ -54,7 +54,7 @@ impl<'a, T: Config> ContractModule<'a, T> { elements::deserialize_buffer(original_code).map_err(|_| "Can't decode wasm code")?; // Make sure that the module is valid. - validate_module::(&module, ()).map_err(|_| "Module is not valid")?; + validate_module::(&module).map_err(|_| "Module is not valid")?; // Return a `ContractModule` instance with // __valid__ module. @@ -182,8 +182,8 @@ impl<'a, T: Config> ContractModule<'a, T> { Ok(()) } - fn inject_gas_metering(self, determinism: Determinism) -> Result { - let gas_rules = self.schedule.rules(&self.module, determinism); + fn inject_gas_metering(self) -> Result { + let gas_rules = self.schedule.rules(&self.module); let contract_module = wasm_instrument::gas_metering::inject(self.module, &gas_rules, "seal0") .map_err(|_| "gas instrumentation failed")?; @@ -369,7 +369,6 @@ fn get_memory_limits( fn check_and_instrument( original_code: &[u8], schedule: &Schedule, - determinism: Determinism, ) -> Result<(Vec, (u32, u32)), &'static str> { let result = (|| { let contract_module = ContractModule::new(original_code, schedule)?; @@ -377,20 +376,17 @@ fn check_and_instrument( contract_module.ensure_no_internal_memory()?; contract_module.ensure_table_size_limit(schedule.limits.table_size)?; contract_module.ensure_global_variable_limit(schedule.limits.globals)?; + contract_module.ensure_no_floating_types()?; contract_module.ensure_parameter_limit(schedule.limits.parameters)?; contract_module.ensure_br_table_size_limit(schedule.limits.br_table_size)?; - if matches!(determinism, Determinism::Deterministic) { - contract_module.ensure_no_floating_types()?; - } - // We disallow importing `gas` function here since it is treated as implementation detail. let disallowed_imports = [b"gas".as_ref()]; let memory_limits = get_memory_limits(contract_module.scan_imports::(&disallowed_imports)?, schedule)?; let code = contract_module - .inject_gas_metering(determinism)? + .inject_gas_metering()? .inject_stack_height_metering()? .into_wasm_code()?; @@ -408,11 +404,9 @@ fn do_preparation( original_code: CodeVec, schedule: &Schedule, owner: AccountIdOf, - determinism: Determinism, ) -> Result, (DispatchError, &'static str)> { - let (code, (initial, maximum)) = - check_and_instrument::(original_code.as_ref(), schedule, determinism) - .map_err(|msg| (>::CodeRejected.into(), msg))?; + let (code, (initial, maximum)) = check_and_instrument::(original_code.as_ref(), schedule) + .map_err(|msg| (>::CodeRejected.into(), msg))?; let original_code_len = original_code.len(); let mut module = PrefabWasmModule { @@ -420,7 +414,6 @@ fn do_preparation( initial, maximum, code: code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, - determinism, code_hash: T::Hashing::hash(&original_code), original_code: Some(original_code), owner_info: None, @@ -433,7 +426,7 @@ fn do_preparation( .saturating_add(original_code_len) .saturating_add(>::max_encoded_len()) as u32; let deposit = Diff { bytes_added, items_added: 3, ..Default::default() } - .update_contract::(None) + .to_deposit::() .charge_or_zero(); module.owner_info = Some(OwnerInfo { owner, deposit, refcount: 0 }); @@ -456,9 +449,8 @@ pub fn prepare_contract( original_code: CodeVec, schedule: &Schedule, owner: AccountIdOf, - determinism: Determinism, ) -> Result, (DispatchError, &'static str)> { - do_preparation::(original_code, schedule, owner, determinism) + do_preparation::(original_code, schedule, owner) } /// The same as [`prepare_contract`] but without constructing a new [`PrefabWasmModule`] @@ -469,9 +461,8 @@ pub fn prepare_contract( pub fn reinstrument_contract( original_code: &[u8], schedule: &Schedule, - determinism: Determinism, ) -> Result, &'static str> { - Ok(check_and_instrument::(original_code, schedule, determinism)?.0) + Ok(check_and_instrument::(original_code, schedule)?.0) } /// Alternate (possibly unsafe) preparation functions used only for benchmarking. @@ -504,7 +495,6 @@ pub mod benchmarking { maximum: memory_limits.1, code_hash: T::Hashing::hash(&original_code), original_code: Some(original_code.try_into().map_err(|_| "Original code too large")?), - determinism: Determinism::Deterministic, code: contract_module .into_wasm_code()? .try_into() @@ -527,7 +517,6 @@ mod tests { schedule::Limits, tests::{Test, ALICE}, }; - use pallet_contracts_proc_macro::define_env; use std::fmt; impl fmt::Debug for PrefabWasmModule { @@ -543,27 +532,17 @@ mod tests { // Define test environment for tests. We need ImportSatisfyCheck // implementation from it. So actual implementations doesn't matter. - #[define_env] - pub mod test_env { - fn panic(_ctx: crate::wasm::Runtime) -> Result<(), TrapReason> { - Ok(()) - } + define_env!(Test, , + [seal0] panic(_ctx) => { unreachable!(); }, // gas is an implementation defined function and a contract can't import it. - fn gas(_ctx: crate::wasm::Runtime, _amount: u32) -> Result<(), TrapReason> { - Ok(()) - } + [seal0] gas(_ctx, _amount: u32) => { unreachable!(); }, - fn nop(_ctx: crate::wasm::Runtime, _unused: u64) -> Result<(), TrapReason> { - Ok(()) - } + [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, // new version of nop with other data type for argumebt - #[version(1)] - fn nop(_ctx: crate::wasm::Runtime, _unused: i32) -> Result<(), TrapReason> { - Ok(()) - } - } + [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, + ); } macro_rules! prepare_test { @@ -582,7 +561,7 @@ mod tests { }, .. Default::default() }; - let r = do_preparation::(wasm, &schedule, ALICE, Determinism::Deterministic); + let r = do_preparation::(wasm, &schedule, ALICE); assert_matches::assert_matches!(r.map_err(|(_, msg)| msg), $($expected)*); } }; diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 50947962c0631..3d5e62f2c1333 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -26,20 +26,16 @@ use crate::{ }; use bitflags::bitflags; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; -use pallet_contracts_proc_macro::define_env; -use sp_core::crypto::UncheckedFrom; +use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, Zero}; use sp_sandbox::SandboxMemory; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::ValueType; -/// The maximum nesting depth a contract can use when encoding types. -const MAX_DECODE_NESTING: u32 = 256; - /// Type of a storage key. #[allow(dead_code)] enum KeyType { @@ -164,7 +160,7 @@ impl> From for TrapReason { pub enum RuntimeCosts { /// Charge the gas meter with the cost of a metering block. The charged costs are /// the supplied cost of the block plus the overhead of the metering itself. - MeteringBlock(u64), + MeteringBlock(u32), /// Weight charged for copying data from the sandbox. CopyFromContract(u32), /// Weight charged for copying data to the sandbox. @@ -261,7 +257,7 @@ impl RuntimeCosts { { use self::RuntimeCosts::*; let weight = match *self { - MeteringBlock(amount) => s.gas.saturating_add(amount), + MeteringBlock(amount) => s.gas.saturating_add(amount.into()), CopyFromContract(len) => s.return_per_byte.saturating_mul(len.into()), CopyToContract(len) => s.input_per_byte.saturating_mul(len.into()), Caller => s.caller, @@ -327,14 +323,14 @@ impl RuntimeCosts { EcdsaRecovery => s.ecdsa_recover, ChainExtension(amount) => amount, #[cfg(feature = "unstable-interface")] - CallRuntime(weight) => weight.ref_time(), + CallRuntime(weight) => weight, SetCodeHash => s.set_code_hash, EcdsaToEthAddress => s.ecdsa_to_eth_address, }; RuntimeToken { #[cfg(test)] _created_from: *self, - weight: Weight::from_ref_time(weight), + weight, } } } @@ -483,10 +479,10 @@ where TrapReason::Return(ReturnData { flags, data }) => { let flags = ReturnFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?; - Ok(ExecReturnValue { flags, data }) + Ok(ExecReturnValue { flags, data: Bytes(data) }) }, TrapReason::Termination => - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), TrapReason::SupervisorError(error) => return Err(error.into()), } } @@ -494,7 +490,7 @@ where // Check the exact type of the error. match sandbox_result { // No traps were generated. Proceed normally. - Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), + Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). // This shouldn't happen because validation process ought to reject such binaries. @@ -579,7 +575,7 @@ where ptr: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) + let decoded = D::decode_all(&mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } @@ -601,7 +597,7 @@ where len: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, len)?; - let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) + let decoded = D::decode_all(&mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } @@ -857,7 +853,7 @@ where self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; } self.ext.call( - Weight::from_ref_time(gas), + gas, callee, value, input_data, @@ -879,7 +875,7 @@ where if let Ok(return_value) = call_outcome { return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), - data: return_value.data, + data: return_value.data.0, })) } } @@ -906,7 +902,6 @@ where salt_ptr: u32, salt_len: u32, ) -> Result { - let gas = Weight::from_ref_time(gas); self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; if value > 0u32.into() { @@ -949,237 +944,183 @@ where // Any input that leads to a out of bound error (reading or writing) or failing to decode // data passed to the supervisor will lead to a trap. This is not documented explicitly // for every function. -#[define_env] -pub mod env { - /// Account for used gas. Traps if gas used is greater than gas limit. - /// - /// NOTE: This is a implementation defined call and is NOT a part of the public API. - /// This call is supposed to be called only by instrumentation injected code. - /// - /// - amount: How much gas is used. - fn gas(ctx: Runtime, amount: u64) -> Result<(), TrapReason> { +define_env!(Env, , + // Account for used gas. Traps if gas used is greater than gas limit. + // + // NOTE: This is a implementation defined call and is NOT a part of the public API. + // This call is supposed to be called only by instrumentation injected code. + // + // - amount: How much gas is used. + [seal0] gas(ctx, amount: u32) => { ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; Ok(()) - } + }, - /// Set the value at the given key in the contract storage. - /// - /// Equivalent to the newer version of `seal_set_storage` with the exception of the return - /// type. Still a valid thing to call when not interested in the return value. - #[prefixed_alias] - fn set_storage( - ctx: Runtime, - key_ptr: u32, - value_ptr: u32, - value_len: u32, - ) -> Result<(), TrapReason> { + // Set the value at the given key in the contract storage. + // + // Equivalent to the newer version of `seal_set_storage` with the exception of the return + // type. Still a valid thing to call when not interested in the return value. + [seal0] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) - } + }, - /// Set the value at the given key in the contract storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// The value length must not exceed the maximum defined by the contracts module parameters. - /// Specifying a `value_len` of zero will store an empty value. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - /// - `value_ptr`: pointer into the linear memory where the value to set is placed. - /// - `value_len`: the length of the value in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. - #[version(1)] - #[prefixed_alias] - fn set_storage( - ctx: Runtime, - key_ptr: u32, - value_ptr: u32, - value_len: u32, - ) -> Result { + // Set the value at the given key in the contract storage. + // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // + // The value length must not exceed the maximum defined by the contracts module parameters. + // Specifying a `value_len` of zero will store an empty value. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + // - `value_ptr`: pointer into the linear memory where the value to set is placed. + // - `value_len`: the length of the value in bytes. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [seal1] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) -> u32 => { ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len) - } + }, - /// Set the value at the given key in the contract storage. - /// - /// The key and value lengths must not exceed the maximums defined by the contracts module - /// parameters. Specifying a `value_len` of zero will store an empty value. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `value_ptr`: pointer into the linear memory where the value to set is placed. - /// - `value_len`: the length of the value in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. - #[version(2)] - #[prefixed_alias] - fn set_storage( - ctx: Runtime, - key_ptr: u32, - key_len: u32, - value_ptr: u32, - value_len: u32, - ) -> Result { + // Set the value at the given key in the contract storage. + // + // The key and value lengths must not exceed the maximums defined by the contracts module parameters. + // Specifying a `value_len` of zero will store an empty value. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + // - `key_len`: the length of the key in bytes. + // - `value_ptr`: pointer into the linear memory where the value to set is placed. + // - `value_len`: the length of the value in bytes. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [__unstable__] seal_set_storage(ctx, key_ptr: u32, key_len: u32, value_ptr: u32, value_len: u32) -> u32 => { ctx.set_storage(KeyType::Variable(key_len), key_ptr, value_ptr, value_len) - } + }, - /// Clear the value at the given key in the contract storage. - /// - /// Equivalent to the newer version of `seal_clear_storage` with the exception of the return - /// type. Still a valid thing to call when not interested in the return value. - #[prefixed_alias] - fn clear_storage(ctx: Runtime, key_ptr: u32) -> Result<(), TrapReason> { + // Clear the value at the given key in the contract storage. + // + // Equivalent to the newer version of `seal_clear_storage` with the exception of the return + // type. Still a valid thing to call when not interested in the return value. + [seal0] seal_clear_storage(ctx, key_ptr: u32) => { ctx.clear_storage(KeyType::Fix, key_ptr).map(|_| ()) - } + }, - /// Clear the value at the given key in the contract storage. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key is placed. - /// - `key_len`: the length of the key in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. - #[version(1)] - #[prefixed_alias] - fn clear_storage(ctx: Runtime, key_ptr: u32, key_len: u32) -> Result { + // Clear the value at the given key in the contract storage. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key is placed. + // - `key_len`: the length of the key in bytes. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [__unstable__] seal_clear_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { ctx.clear_storage(KeyType::Variable(key_len), key_ptr) - } + }, - /// Retrieve the value under the given key from storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// `ReturnCode::KeyNotFound` - #[prefixed_alias] - fn get_storage( - ctx: Runtime, - key_ptr: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { + // Retrieve the value under the given key from storage. + // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `out_ptr`: pointer to the linear memory where the value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + // + // # Errors + // + // `ReturnCode::KeyNotFound` + [seal0] seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { ctx.get_storage(KeyType::Fix, key_ptr, out_ptr, out_len_ptr) - } + }, - /// Retrieve the value under the given key from storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// `ReturnCode::KeyNotFound` - #[version(1)] - #[prefixed_alias] - fn get_storage( - ctx: Runtime, - key_ptr: u32, - key_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { + // Retrieve the value under the given key from storage. + // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // + // The key length must not exceed the maximum defined by the contracts module parameter. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `key_len`: the length of the key in bytes. + // - `out_ptr`: pointer to the linear memory where the value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + // + // # Errors + // + // `ReturnCode::KeyNotFound` + [__unstable__] seal_get_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { ctx.get_storage(KeyType::Variable(key_len), key_ptr, out_ptr, out_len_ptr) - } + }, - /// Checks whether there is a value stored under the given key. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. - #[prefixed_alias] - fn contains_storage(ctx: Runtime, key_ptr: u32) -> Result { + // Checks whether there is a value stored under the given key. + // + // This version is to be used with a fixed sized storage key. For runtimes supporting transparent + // hashing, please use the newer version of this function. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [seal0] seal_contains_storage(ctx, key_ptr: u32) -> u32 => { ctx.contains_storage(KeyType::Fix, key_ptr) - } + }, - /// Checks whether there is a value stored under the given key. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. - #[version(1)] - #[prefixed_alias] - fn contains_storage(ctx: Runtime, key_ptr: u32, key_len: u32) -> Result { + // Checks whether there is a value stored under the given key. + // + // The key length must not exceed the maximum defined by the contracts module parameter. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `key_len`: the length of the key in bytes. + // + // # Return Value + // + // Returns the size of the pre-existing value at the specified key if any. Otherwise + // `SENTINEL` is returned as a sentinel value. + [__unstable__] seal_contains_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { ctx.contains_storage(KeyType::Variable(key_len), key_ptr) - } + }, - /// Retrieve and remove the value under the given key from storage. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// `ReturnCode::KeyNotFound` - #[unstable] - #[prefixed_alias] - fn take_storage( - ctx: Runtime, - key_ptr: u32, - key_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { + // Retrieve and remove the value under the given key from storage. + // + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `key_len`: the length of the key in bytes. + // - `out_ptr`: pointer to the linear memory where the value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + // + // # Errors + // + // `ReturnCode::KeyNotFound` + [__unstable__] seal_take_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; let key = ctx.read_sandbox_memory(key_ptr, key_len)?; - if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent( - &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, - None, - true, - )? { + if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent(&VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, None, true)? { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(value.len() as u32)); ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, already_charged)?; Ok(ReturnCode::Success) @@ -1187,58 +1128,59 @@ pub mod env { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(0)); Ok(ReturnCode::KeyNotFound) } - } - /// Transfer some value to another account. - /// - /// # Parameters - /// - /// - account_ptr: a pointer to the address of the beneficiary account Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - account_len: length of the address buffer. - /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable - /// as a `T::Balance`. Traps otherwise. - /// - value_len: length of the value buffer. - /// - /// # Errors - /// - /// `ReturnCode::TransferFailed` - #[prefixed_alias] - fn transfer( - ctx: Runtime, + }, + + // Transfer some value to another account. + // + // # Parameters + // + // - account_ptr: a pointer to the address of the beneficiary account + // Should be decodable as an `T::AccountId`. Traps otherwise. + // - account_len: length of the address buffer. + // - value_ptr: a pointer to the buffer with value, how much value to send. + // Should be decodable as a `T::Balance`. Traps otherwise. + // - value_len: length of the value buffer. + // + // # Errors + // + // `ReturnCode::TransferFailed` + [seal0] seal_transfer( + ctx, account_ptr: u32, _account_len: u32, value_ptr: u32, - _value_len: u32, - ) -> Result { + _value_len: u32 + ) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::Transfer)?; let callee: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; - let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr)?; + let value: BalanceOf<::T> = + ctx.read_sandbox_memory_as(value_ptr)?; + let result = ctx.ext.transfer(&callee, value); match result { Ok(()) => Ok(ReturnCode::Success), Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) - }, + } } - } + }, - /// Make a call to another contract. - /// - /// # Deprecation - /// - /// This is equivalent to calling the newer version of this function with - /// `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. - /// - /// # Note - /// - /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes - /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards - /// compatibility. Consider switching to the newest version of this function. - #[prefixed_alias] - fn call( - ctx: Runtime, + // Make a call to another contract. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function with + // `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. + // + // # Note + // + // The values `_callee_len` and `_value_len` are ignored because the encoded sizes + // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_call( + ctx, callee_ptr: u32, _callee_len: u32, gas: u64, @@ -1247,51 +1189,49 @@ pub mod env { input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32, - ) -> Result { + output_len_ptr: u32 + ) -> ReturnCode => { ctx.call( CallFlags::ALLOW_REENTRY, - CallType::Call { callee_ptr, value_ptr, gas }, + CallType::Call{callee_ptr, value_ptr, gas}, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - } + }, - /// Make a call to another contract. - /// - /// The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. - /// The copy of the output buffer can be skipped by supplying the sentinel value - /// of `SENTINEL` to `output_ptr`. - /// - /// # Parameters - /// - /// - flags: See [`CallFlags`] for a documenation of the supported flags. - /// - callee_ptr: a pointer to the address of the callee contract. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - gas: how much gas to devote to the execution. - /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable - /// as a `T::Balance`. Traps otherwise. - /// - input_data_ptr: a pointer to a buffer to be used as input data to the callee. - /// - input_data_len: length of the input data buffer. - /// - output_ptr: a pointer where the output buffer is copied to. - /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - /// # Errors - /// - /// An error means that the call wasn't successful output buffer is returned unless - /// stated otherwise. - /// - /// `ReturnCode::CalleeReverted`: Output buffer is returned. - /// `ReturnCode::CalleeTrapped` - /// `ReturnCode::TransferFailed` - /// `ReturnCode::NotCallable` - #[version(1)] - #[prefixed_alias] - fn call( - ctx: Runtime, + // Make a call to another contract. + // + // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. + // The copy of the output buffer can be skipped by supplying the sentinel value + // of `SENTINEL` to `output_ptr`. + // + // # Parameters + // + // - flags: See [`CallFlags`] for a documenation of the supported flags. + // - callee_ptr: a pointer to the address of the callee contract. + // Should be decodable as an `T::AccountId`. Traps otherwise. + // - gas: how much gas to devote to the execution. + // - value_ptr: a pointer to the buffer with value, how much value to send. + // Should be decodable as a `T::Balance`. Traps otherwise. + // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. + // - input_data_len: length of the input data buffer. + // - output_ptr: a pointer where the output buffer is copied to. + // - output_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // + // # Errors + // + // An error means that the call wasn't successful output buffer is returned unless + // stated otherwise. + // + // `ReturnCode::CalleeReverted`: Output buffer is returned. + // `ReturnCode::CalleeTrapped` + // `ReturnCode::TransferFailed` + // `ReturnCode::NotCallable` + [seal1] seal_call( + ctx, flags: u32, callee_ptr: u32, gas: u64, @@ -1299,76 +1239,75 @@ pub mod env { input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32, - ) -> Result { + output_len_ptr: u32 + ) -> ReturnCode => { ctx.call( CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::Call { callee_ptr, value_ptr, gas }, + CallType::Call{callee_ptr, value_ptr, gas}, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - } + }, - /// Execute code in the context (storage, caller, value) of the current contract. - /// - /// Reentrancy protection is always disabled since the callee is allowed - /// to modify the callers storage. This makes going through a reentrancy attack - /// unnecessary for the callee when it wants to exploit the caller. - /// - /// # Parameters - /// - /// - flags: See [`CallFlags`] for a documentation of the supported flags. - /// - code_hash: a pointer to the hash of the code to be called. - /// - input_data_ptr: a pointer to a buffer to be used as input data to the callee. - /// - input_data_len: length of the input data buffer. - /// - output_ptr: a pointer where the output buffer is copied to. - /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - /// # Errors - /// - /// An error means that the call wasn't successful and no output buffer is returned unless - /// stated otherwise. - /// - /// `ReturnCode::CalleeReverted`: Output buffer is returned. - /// `ReturnCode::CalleeTrapped` - /// `ReturnCode::CodeNotFound` - #[prefixed_alias] - fn delegate_call( - ctx: Runtime, + // Execute code in the context (storage, caller, value) of the current contract. + // + // Reentrancy protection is always disabled since the callee is allowed + // to modify the callers storage. This makes going through a reentrancy attack + // unnecessary for the callee when it wants to exploit the caller. + // + // # Parameters + // + // - flags: See [`CallFlags`] for a documentation of the supported flags. + // - code_hash: a pointer to the hash of the code to be called. + // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. + // - input_data_len: length of the input data buffer. + // - output_ptr: a pointer where the output buffer is copied to. + // - output_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // + // # Errors + // + // An error means that the call wasn't successful and no output buffer is returned unless + // stated otherwise. + // + // `ReturnCode::CalleeReverted`: Output buffer is returned. + // `ReturnCode::CalleeTrapped` + // `ReturnCode::CodeNotFound` + [seal0] seal_delegate_call( + ctx, flags: u32, code_hash_ptr: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32, - ) -> Result { + output_len_ptr: u32 + ) -> ReturnCode => { ctx.call( CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::DelegateCall { code_hash_ptr }, + CallType::DelegateCall{code_hash_ptr}, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - } - /// Instantiate a contract with the specified code hash. - /// - /// # Deprecation - /// - /// This is equivalent to calling the newer version of this function. The newer version - /// drops the now unnecessary length fields. - /// - /// # Note - /// - /// The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes - /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards - /// compatibility. Consider switching to the newest version of this function. - #[prefixed_alias] - fn instantiate( - ctx: Runtime, + }, + + // Instantiate a contract with the specified code hash. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes + // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_instantiate( + ctx, code_hash_ptr: u32, _code_hash_len: u32, gas: u64, @@ -1381,9 +1320,9 @@ pub mod env { output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32, - ) -> Result { - ctx.instantiate( + salt_len: u32 + ) -> ReturnCode => { + ctx.instantiate ( code_hash_ptr, gas, value_ptr, @@ -1396,52 +1335,50 @@ pub mod env { salt_ptr, salt_len, ) - } + }, - /// Instantiate a contract with the specified code hash. - /// - /// This function creates an account and executes the constructor defined in the code specified - /// by the code hash. The address of this new account is copied to `address_ptr` and its length - /// to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its - /// length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - /// supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. - /// - /// `value` must be at least the minimum balance. Otherwise the instantiation fails and the - /// contract is not created. - /// - /// # Parameters - /// - /// - code_hash_ptr: a pointer to the buffer that contains the initializer code. - /// - gas: how much gas to devote to the execution of the initializer code. - /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable - /// as a `T::Balance`. Traps otherwise. - /// - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. - /// - input_data_len: length of the input data buffer. - /// - address_ptr: a pointer where the new account's address is copied to. - /// - address_len_ptr: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - output_ptr: a pointer where the output buffer is copied to. - /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. - /// - salt_len: length in bytes of the supplied salt. - /// - /// # Errors - /// - /// Please consult the `ReturnCode` enum declaration for more information on those - /// errors. Here we only note things specific to this function. - /// - /// An error means that the account wasn't created and no address or output buffer - /// is returned unless stated otherwise. - /// - /// `ReturnCode::CalleeReverted`: Output buffer is returned. - /// `ReturnCode::CalleeTrapped` - /// `ReturnCode::TransferFailed` - /// `ReturnCode::CodeNotFound` - #[version(1)] - #[prefixed_alias] - fn instantiate( - ctx: Runtime, + // Instantiate a contract with the specified code hash. + // + // This function creates an account and executes the constructor defined in the code specified + // by the code hash. The address of this new account is copied to `address_ptr` and its length + // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its + // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by + // supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. + // + // `value` must be at least the minimum balance. Otherwise the instantiation fails and the + // contract is not created. + // + // # Parameters + // + // - code_hash_ptr: a pointer to the buffer that contains the initializer code. + // - gas: how much gas to devote to the execution of the initializer code. + // - value_ptr: a pointer to the buffer with value, how much value to send. + // Should be decodable as a `T::Balance`. Traps otherwise. + // - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. + // - input_data_len: length of the input data buffer. + // - address_ptr: a pointer where the new account's address is copied to. + // - address_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // - output_ptr: a pointer where the output buffer is copied to. + // - output_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. + // - salt_len: length in bytes of the supplied salt. + // + // # Errors + // + // Please consult the `ReturnCode` enum declaration for more information on those + // errors. Here we only note things specific to this function. + // + // An error means that the account wasn't created and no address or output buffer + // is returned unless stated otherwise. + // + // `ReturnCode::CalleeReverted`: Output buffer is returned. + // `ReturnCode::CalleeTrapped` + // `ReturnCode::TransferFailed` + // `ReturnCode::CodeNotFound` + [seal1] seal_instantiate( + ctx, code_hash_ptr: u32, gas: u64, value_ptr: u32, @@ -1452,8 +1389,8 @@ pub mod env { output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32, - ) -> Result { + salt_len: u32 + ) -> ReturnCode => { ctx.instantiate( code_hash_ptr, gas, @@ -1467,62 +1404,54 @@ pub mod env { salt_ptr, salt_len, ) - } + }, - /// Remove the calling account and transfer remaining balance. - /// - /// # Deprecation - /// - /// This is equivalent to calling the newer version of this function. The newer version - /// drops the now unnecessary length fields. - /// - /// # Note - /// - /// The value `_beneficiary_len` is ignored because the encoded sizes - /// this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards - /// compatibility. Consider switching to the newest version of this function. - #[prefixed_alias] - fn terminate( - ctx: Runtime, - beneficiary_ptr: u32, - _beneficiary_len: u32, - ) -> Result<(), TrapReason> { + // Remove the calling account and transfer remaining balance. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The value `_beneficiary_len` is ignored because the encoded sizes + // this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_terminate(ctx, beneficiary_ptr: u32, _beneficiary_len: u32) => { ctx.terminate(beneficiary_ptr) - } + }, - /// Remove the calling account and transfer remaining **free** balance. - /// - /// This function never returns. Either the termination was successful and the - /// execution of the destroyed contract is halted. Or it failed during the termination - /// which is considered fatal and results in a trap + rollback. - /// - /// - beneficiary_ptr: a pointer to the address of the beneficiary account where all where all - /// remaining funds of the caller are transferred. Should be decodable as an `T::AccountId`. - /// Traps otherwise. - /// - /// # Traps - /// - /// - The contract is live i.e is already on the call stack. - /// - Failed to send the balance to the beneficiary. - /// - The deletion queue is full. - #[version(1)] - #[prefixed_alias] - fn terminate(ctx: Runtime, beneficiary_ptr: u32) -> Result<(), TrapReason> { + // Remove the calling account and transfer remaining **free** balance. + // + // This function never returns. Either the termination was successful and the + // execution of the destroyed contract is halted. Or it failed during the termination + // which is considered fatal and results in a trap + rollback. + // + // - beneficiary_ptr: a pointer to the address of the beneficiary account where all + // where all remaining funds of the caller are transferred. + // Should be decodable as an `T::AccountId`. Traps otherwise. + // + // # Traps + // + // - The contract is live i.e is already on the call stack. + // - Failed to send the balance to the beneficiary. + // - The deletion queue is full. + [seal1] seal_terminate(ctx, beneficiary_ptr: u32) => { ctx.terminate(beneficiary_ptr) - } + }, - /// Stores the input passed by the caller into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Note - /// - /// This function traps if the input was previously forwarded by a `seal_call`. - #[prefixed_alias] - fn input(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the input passed by the caller into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // # Note + // + // This function traps if the input was previously forwarded by a `seal_call`. + [seal0] seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { @@ -1533,399 +1462,300 @@ pub mod env { } else { Err(Error::::InputForwarded.into()) } - } + }, - /// Cease contract execution and save a data buffer as a result of the execution. - /// - /// This function never returns as it stops execution of the caller. - /// This is the only way to return a data buffer to the caller. Returning from - /// execution without calling this function is equivalent to calling: - /// ``` - /// seal_return(0, 0, 0); - /// ``` - /// - /// The flags argument is a bitfield that can be used to signal special return - /// conditions to the supervisor: - /// --- lsb --- - /// bit 0 : REVERT - Revert all storage changes made by the caller. - /// bit [1, 31]: Reserved for future use. - /// --- msb --- - /// - /// Using a reserved bit triggers a trap. - fn seal_return( - ctx: Runtime, - flags: u32, - data_ptr: u32, - data_len: u32, - ) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::Return(data_len))?; - Err(TrapReason::Return(ReturnData { + // Cease contract execution and save a data buffer as a result of the execution. + // + // This function never returns as it stops execution of the caller. + // This is the only way to return a data buffer to the caller. Returning from + // execution without calling this function is equivalent to calling: + // ``` + // seal_return(0, 0, 0); + // ``` + // + // The flags argument is a bitfield that can be used to signal special return + // conditions to the supervisor: + // --- lsb --- + // bit 0 : REVERT - Revert all storage changes made by the caller. + // bit [1, 31]: Reserved for future use. + // --- msb --- + // + // Using a reserved bit triggers a trap. + [seal0] seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { + ctx.charge_gas(RuntimeCosts::Return(data_len))?; + Err(TrapReason::Return(ReturnData { flags, data: ctx.read_sandbox_memory(data_ptr, data_len)?, })) - } + }, - /// Stores the address of the caller into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then - /// the address of the contract will be returned. The value is encoded as T::AccountId. - #[prefixed_alias] - fn caller(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the address of the caller into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the + // address of the contract will be returned. The value is encoded as T::AccountId. + [seal0] seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Caller)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.caller().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged )?) - } + }, - /// Checks whether a specified address belongs to a contract. - /// - /// # Parameters - /// - /// - account_ptr: a pointer to the address of the beneficiary account Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). - #[prefixed_alias] - fn is_contract(ctx: Runtime, account_ptr: u32) -> Result { + // Checks whether a specified address belongs to a contract. + // + // # Parameters + // + // - account_ptr: a pointer to the address of the beneficiary account + // Should be decodable as an `T::AccountId`. Traps otherwise. + // + // Returned value is a u32-encoded boolean: (0 = false, 1 = true). + [seal0] seal_is_contract(ctx, account_ptr: u32) -> u32 => { ctx.charge_gas(RuntimeCosts::IsContract)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; Ok(ctx.ext.is_contract(&address) as u32) - } + }, - /// Retrieve the code hash for a specified contract address. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address in question. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// `ReturnCode::KeyNotFound` - #[prefixed_alias] - fn code_hash( - ctx: Runtime, - account_ptr: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { + // Retrieve the code hash for a specified contract address. + // + // # Parameters + // + // - `account_ptr`: a pointer to the address in question. + // Should be decodable as an `T::AccountId`. Traps otherwise. + // - `out_ptr`: pointer to the linear memory where the returning value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + // + // # Errors + // + // `ReturnCode::KeyNotFound` + [seal0] seal_code_hash(ctx, account_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::CodeHash)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; if let Some(value) = ctx.ext.code_hash(&address) { - ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &value.encode(), - false, - already_charged, - )?; + ctx.write_sandbox_output(out_ptr, out_len_ptr, &value.encode(), false, already_charged)?; Ok(ReturnCode::Success) } else { Ok(ReturnCode::KeyNotFound) } - } + }, - /// Retrieve the code hash of the currently executing contract. - /// - /// # Parameters - /// - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - #[prefixed_alias] - fn own_code_hash(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Retrieve the code hash of the currently executing contract. + // + // # Parameters + // + // - `out_ptr`: pointer to the linear memory where the returning value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + [seal0] seal_own_code_hash(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::OwnCodeHash)?; let code_hash_encoded = &ctx.ext.own_code_hash().encode(); - Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - code_hash_encoded, - false, - already_charged, - )?) - } + Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, code_hash_encoded, false, already_charged)?) + }, - /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// - /// Prefer this over `seal_is_contract` when checking whether your contract is being called by a - /// contract or a plain account. The reason is that it performs better since it does not need to - /// do any storage lookups. - /// - /// A return value of`true` indicates that this contract is being called by a plain account - /// and `false` indicates that the caller is another contract. - /// - /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). - #[prefixed_alias] - fn caller_is_origin(ctx: Runtime) -> Result { + // Checks whether the caller of the current contract is the origin of the whole call stack. + // + // Prefer this over `seal_is_contract` when checking whether your contract is being called by a contract + // or a plain account. The reason is that it performs better since it does not need to + // do any storage lookups. + // + // A return value of`true` indicates that this contract is being called by a plain account + // and `false` indicates that the caller is another contract. + // + // Returned value is a u32-encoded boolean: (0 = false, 1 = true). + [seal0] seal_caller_is_origin(ctx) -> u32 => { ctx.charge_gas(RuntimeCosts::CallerIsOrigin)?; Ok(ctx.ext.caller_is_origin() as u32) - } + }, - /// Stores the address of the current contract into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - #[prefixed_alias] - fn address(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the address of the current contract into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + [seal0] seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Address)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.address().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged )?) - } + }, - /// Stores the price for the specified amount of gas into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as T::Balance. - /// - /// # Note - /// - /// It is recommended to avoid specifying very small values for `gas` as the prices for a single - /// gas can be smaller than one. - #[prefixed_alias] - fn weight_to_fee( - ctx: Runtime, - gas: u64, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { - let gas = Weight::from_ref_time(gas); + // Stores the price for the specified amount of gas into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + // + // # Note + // + // It is recommended to avoid specifying very small values for `gas` as the prices for a single + // gas can be smaller than one. + [seal0] seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::WeightToFee)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.get_weight_price(gas).encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged )?) - } + }, - /// Stores the amount of gas left into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as Gas. - #[prefixed_alias] - fn gas_left(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the amount of gas left into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as Gas. + [seal0] seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::GasLeft)?; - let gas_left = &ctx.ext.gas_meter().gas_left().ref_time().encode(); - Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, gas_left, false, already_charged)?) - } + let gas_left = &ctx.ext.gas_meter().gas_left().encode(); + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, gas_left, false, already_charged, + )?) + }, - /// Stores the **free* balance of the current account into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as T::Balance. - #[prefixed_alias] - fn balance(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the **free* balance of the current account into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + [seal0] seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Balance)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.balance().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged )?) - } + }, - /// Stores the value transferred along with this call/instantiate into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as T::Balance. - #[prefixed_alias] - fn value_transferred( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + // Stores the value transferred along with this call/instantiate into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + [seal0] seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::ValueTransferred)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.value_transferred().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged )?) - } + }, - /// Stores a random number for the current block and the given subject into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as T::Hash. - /// - /// # Deprecation - /// - /// This function is deprecated. Users should migrate to the version in the "seal1" module. - #[prefixed_alias] - fn random( - ctx: Runtime, - subject_ptr: u32, - subject_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + // Stores a random number for the current block and the given subject into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Hash. + // + // # Deprecation + // + // This function is deprecated. Users should migrate to the version in the "seal1" module. + [seal0] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()) + return Err(Error::::RandomSubjectTooLong.into()); } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.random(&subject_buf).0.encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).0.encode(), false, already_charged )?) - } + }, - /// Stores a random number for the current block and the given subject into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as (T::Hash, T::BlockNumber). - /// - /// # Changes from v0 - /// - /// In addition to the seed it returns the block number since which it was determinable - /// by chain observers. - /// - /// # Note - /// - /// The returned seed should only be used to distinguish commitments made before - /// the returned block number. If the block number is too early (i.e. commitments were - /// made afterwards), then ensure no further commitments may be made and repeatedly - /// call this on later blocks until the block number returned is later than the latest - /// commitment. - #[version(1)] - #[prefixed_alias] - fn random( - ctx: Runtime, - subject_ptr: u32, - subject_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + // Stores a random number for the current block and the given subject into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as (T::Hash, T::BlockNumber). + // + // # Changes from v0 + // + // In addition to the seed it returns the block number since which it was determinable + // by chain observers. + // + // # Note + // + // The returned seed should only be used to distinguish commitments made before + // the returned block number. If the block number is too early (i.e. commitments were + // made afterwards), then ensure no further commitments may be made and repeatedly + // call this on later blocks until the block number returned is later than the latest + // commitment. + [seal1] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()) + return Err(Error::::RandomSubjectTooLong.into()); } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.random(&subject_buf).encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, already_charged )?) - } + }, - /// Load the latest block timestamp into the supplied buffer - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - #[prefixed_alias] - fn now(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Load the latest block timestamp into the supplied buffer + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + [seal0] seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Now)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.now().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged )?) - } + }, - /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// - /// The data is encoded as T::Balance. - #[prefixed_alias] - fn minimum_balance(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + // + // The data is encoded as T::Balance. + [seal0] seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::MinimumBalance)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.minimum_balance().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged )?) - } + }, - /// Stores the tombstone deposit into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Deprecation - /// - /// There is no longer a tombstone deposit. This function always returns 0. - #[prefixed_alias] - fn tombstone_deposit( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + // Stores the tombstone deposit into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // # Deprecation + // + // There is no longer a tombstone deposit. This function always returns 0. + [seal0] seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Balance)?; let deposit = >::zero().encode(); Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, &deposit, false, already_charged)?) - } + }, - /// Was used to restore the given destination contract sacrificing the caller. - /// - /// # Note - /// - /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity - #[prefixed_alias] - fn restore_to( - ctx: Runtime, + // Was used to restore the given destination contract sacrificing the caller. + // + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity + [seal0] seal_restore_to( + ctx, _dest_ptr: u32, _dest_len: u32, _code_hash_ptr: u32, @@ -1933,80 +1763,86 @@ pub mod env { _rent_allowance_ptr: u32, _rent_allowance_len: u32, _delta_ptr: u32, - _delta_count: u32, - ) -> Result<(), TrapReason> { + _delta_count: u32 + ) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - } + }, - /// Was used to restore the given destination contract sacrificing the caller. - /// - /// # Note - /// - /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity - #[version(1)] - #[prefixed_alias] - fn restore_to( - ctx: Runtime, + // Was used to restore the given destination contract sacrificing the caller. + // + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity + [seal1] seal_restore_to( + ctx, _dest_ptr: u32, _code_hash_ptr: u32, _rent_allowance_ptr: u32, _delta_ptr: u32, - _delta_count: u32, - ) -> Result<(), TrapReason> { + _delta_count: u32 + ) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - } + }, - /// Deposit a contract event with the data buffer and optional list of topics. There is a limit - /// on the maximum number of topics specified by `event_topics`. - /// - /// - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of - /// this is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. - /// - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. - /// - data_ptr - a pointer to a raw data buffer which will saved along the event. - /// - data_len - the length of the data buffer. - #[prefixed_alias] - fn deposit_event( - ctx: Runtime, + // Deposit a contract event with the data buffer and optional list of topics. There is a limit + // on the maximum number of topics specified by `event_topics`. + // + // - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of this + // is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. + // - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. + // - data_ptr - a pointer to a raw data buffer which will saved along the event. + // - data_len - the length of the data buffer. + [seal0] seal_deposit_event( + ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, - data_len: u32, - ) -> Result<(), TrapReason> { + data_len: u32 + ) => { fn has_duplicates(items: &mut Vec) -> bool { - items.sort(); + // # Warning + // + // Unstable sorts are non-deterministic across architectures. The usage here is OK + // because we are rejecting duplicates which removes the non determinism. + items.sort_unstable(); // Find any two consecutive equal elements. - items.windows(2).any(|w| match &w { - &[a, b] => a == b, - _ => false, + items.windows(2).any(|w| { + match &w { + &[a, b] => a == b, + _ => false, + } }) } let num_topic = topics_len .checked_div(sp_std::mem::size_of::>() as u32) .ok_or("Zero sized topics are not allowed")?; - ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; + ctx.charge_gas(RuntimeCosts::DepositEvent { + num_topic, + len: data_len, + })?; if data_len > ctx.ext.max_value_size() { - return Err(Error::::ValueTooLarge.into()) + return Err(Error::::ValueTooLarge.into()); } - let mut topics: Vec::T>> = match topics_len { + let mut topics: Vec::::T>> = match topics_len { 0 => Vec::new(), _ => ctx.read_sandbox_memory_as_unbounded(topics_ptr, topics_len)?, }; // If there are more than `event_topics`, then trap. if topics.len() > ctx.ext.schedule().limits.event_topics as usize { - return Err(Error::::TooManyTopics.into()) + return Err(Error::::TooManyTopics.into()); } // Check for duplicate topics. If there are any, then trap. // Complexity O(n * log(n)) and no additional allocations. // This also sorts the topics. if has_duplicates(&mut topics) { - return Err(Error::::DuplicateTopics.into()) + return Err(Error::::DuplicateTopics.into()); } let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; @@ -2014,209 +1850,179 @@ pub mod env { ctx.ext.deposit_event(topics, event_data); Ok(()) - } + }, - /// Was used to set rent allowance of the contract. - /// - /// # Note - /// - /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity. - #[prefixed_alias] - fn set_rent_allowance( - ctx: Runtime, - _value_ptr: u32, - _value_len: u32, - ) -> Result<(), TrapReason> { + // Was used to set rent allowance of the contract. + // + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity. + [seal0] seal_set_rent_allowance(ctx, _value_ptr: u32, _value_len: u32) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - } + }, - /// Was used to set rent allowance of the contract. - /// - /// # Note - /// - /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity. - #[version(1)] - #[prefixed_alias] - fn set_rent_allowance(ctx: Runtime, _value_ptr: u32) -> Result<(), TrapReason> { + // Was used to set rent allowance of the contract. + // + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity. + [seal1] seal_set_rent_allowance(ctx, _value_ptr: u32) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - } + }, - /// Was used to store the rent allowance into the supplied buffer. - /// - /// # Note - /// - /// The state rent functionality was removed. This is stub only exists for - /// backwards compatiblity. - #[prefixed_alias] - fn rent_allowance(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Was used to store the rent allowance into the supplied buffer. + // + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity. + [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Balance)?; let rent_allowance = >::max_value().encode(); Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &rent_allowance, - false, - already_charged, + out_ptr, out_len_ptr, &rent_allowance, false, already_charged )?) - } + }, - /// Stores the current block number of the current contract into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - #[prefixed_alias] - fn block_number(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + // Stores the current block number of the current contract into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + [seal0] seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::BlockNumber)?; Ok(ctx.write_sandbox_output( - out_ptr, - out_len_ptr, - &ctx.ext.block_number().encode(), - false, - already_charged, + out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged )?) - } + }, - /// Computes the SHA2 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - #[prefixed_alias] - fn hash_sha2_256( - ctx: Runtime, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { + // Computes the SHA2 256-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 32 bytes (256 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + [seal0] seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeCosts::HashSha256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr)?) - } + }, - /// Computes the KECCAK 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - #[prefixed_alias] - fn hash_keccak_256( - ctx: Runtime, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { + // Computes the KECCAK 256-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 32 bytes (256 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + [seal0] seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeCosts::HashKeccak256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr)?) - } + }, - /// Computes the BLAKE2 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - #[prefixed_alias] - fn hash_blake2_256( - ctx: Runtime, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { + // Computes the BLAKE2 256-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 32 bytes (256 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + [seal0] seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeCosts::HashBlake256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr)?) - } + }, - /// Computes the BLAKE2 128-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 16 bytes (128 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - #[prefixed_alias] - fn hash_blake2_128( - ctx: Runtime, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { + // Computes the BLAKE2 128-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 16 bytes (128 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + [seal0] seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { ctx.charge_gas(RuntimeCosts::HashBlake128(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr)?) - } + }, - /// Call into the chain extension provided by the chain if any. - /// - /// Handling of the input values is up to the specific chain extension and so is the - /// return value. The extension can decide to use the inputs as primitive inputs or as - /// in/out arguments by interpreting them as pointers. Any caller of this function - /// must therefore coordinate with the chain that it targets. - /// - /// # Note - /// - /// If no chain extension exists the contract will trap with the `NoChainExtension` - /// module error. - #[prefixed_alias] - fn call_chain_extension( - ctx: Runtime, + // Call into the chain extension provided by the chain if any. + // + // Handling of the input values is up to the specific chain extension and so is the + // return value. The extension can decide to use the inputs as primitive inputs or as + // in/out arguments by interpreting them as pointers. Any caller of this function + // must therefore coordinate with the chain that it targets. + // + // # Note + // + // If no chain extension exists the contract will trap with the `NoChainExtension` + // module error. + [seal0] seal_call_chain_extension( + ctx, id: u32, input_ptr: u32, input_len: u32, output_ptr: u32, - output_len_ptr: u32, - ) -> Result { + output_len_ptr: u32 + ) -> u32 => { use crate::chain_extension::{ChainExtension, Environment, RetVal}; if !::ChainExtension::enabled() { - return Err(Error::::NoChainExtension.into()) + return Err(Error::::NoChainExtension.into()); } let mut chain_extension = ctx.chain_extension.take().expect( "Constructor initializes with `Some`. This is the only place where it is set to `None`.\ @@ -2225,95 +2031,87 @@ pub mod env { let env = Environment::new(ctx, id, input_ptr, input_len, output_ptr, output_len_ptr); let ret = match chain_extension.call(env)? { RetVal::Converging(val) => Ok(val), - RetVal::Diverging { flags, data } => - Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })), + RetVal::Diverging{flags, data} => Err(TrapReason::Return(ReturnData { + flags: flags.bits(), + data, + })), }; ctx.chain_extension = Some(chain_extension); ret - } + }, - /// Emit a custom debug message. - /// - /// No newlines are added to the supplied message. - /// Specifying invalid UTF-8 triggers a trap. - /// - /// This is a no-op if debug message recording is disabled which is always the case - /// when the code is executing on-chain. The message is interpreted as UTF-8 and - /// appended to the debug buffer which is then supplied to the calling RPC client. - /// - /// # Note - /// - /// Even though no action is taken when debug message recording is disabled there is still - /// a non trivial overhead (and weight cost) associated with calling this function. Contract - /// languages should remove calls to this function (either at runtime or compile time) when - /// not being executed as an RPC. For example, they could allow users to disable logging - /// through compile time flags (cargo features) for on-chain deployment. Additionally, the - /// return value of this function can be cached in order to prevent further calls at runtime. - #[prefixed_alias] - fn debug_message( - ctx: Runtime, - str_ptr: u32, - str_len: u32, - ) -> Result { + // Emit a custom debug message. + // + // No newlines are added to the supplied message. + // Specifying invalid UTF-8 triggers a trap. + // + // This is a no-op if debug message recording is disabled which is always the case + // when the code is executing on-chain. The message is interpreted as UTF-8 and + // appended to the debug buffer which is then supplied to the calling RPC client. + // + // # Note + // + // Even though no action is taken when debug message recording is disabled there is still + // a non trivial overhead (and weight cost) associated with calling this function. Contract + // languages should remove calls to this function (either at runtime or compile time) when + // not being executed as an RPC. For example, they could allow users to disable logging + // through compile time flags (cargo features) for on-chain deployment. Additionally, the + // return value of this function can be cached in order to prevent further calls at runtime. + [seal0] seal_debug_message(ctx, str_ptr: u32, str_len: u32) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; if ctx.ext.append_debug_buffer("") { let data = ctx.read_sandbox_memory(str_ptr, str_len)?; - let msg = - core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; + let msg = core::str::from_utf8(&data) + .map_err(|_| >::DebugMessageInvalidUTF8)?; ctx.ext.append_debug_buffer(msg); - return Ok(ReturnCode::Success) + return Ok(ReturnCode::Success); } Ok(ReturnCode::LoggingDisabled) - } + }, - /// Call some dispatchable of the runtime. - /// - /// This function decodes the passed in data as the overarching `Call` type of the - /// runtime and dispatches it. The weight as specified in the runtime is charged - /// from the gas meter. Any weight refunds made by the dispatchable are considered. - /// - /// The filter specified by `Config::CallFilter` is attached to the origin of - /// the dispatched call. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - /// # Return Value - /// - /// Returns `ReturnCode::Success` when the dispatchable was succesfully executed and - /// returned `Ok`. When the dispatchable was exeuted but returned an error - /// `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not - /// provided because it is not guaranteed to be stable. - /// - /// # Comparison with `ChainExtension` - /// - /// Just as a chain extension this API allows the runtime to extend the functionality - /// of contracts. While making use of this function is generelly easier it cannot be - /// used in call cases. Consider writing a chain extension if you need to do perform - /// one of the following tasks: - /// - /// - Return data. - /// - Provide functionality **exclusively** to contracts. - /// - Provide custom weights. - /// - Avoid the need to keep the `Call` data structure stable. - /// - /// # Unstable - /// - /// This function is unstable and subject to change (or removal) in the future. Do not - /// deploy a contract using it to a production chain. - #[unstable] - #[prefixed_alias] - fn call_runtime( - ctx: Runtime, - call_ptr: u32, - call_len: u32, - ) -> Result { - use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo}; + // Call some dispatchable of the runtime. + // + // This function decodes the passed in data as the overarching `Call` type of the + // runtime and dispatches it. The weight as specified in the runtime is charged + // from the gas meter. Any weight refunds made by the dispatchable are considered. + // + // The filter specified by `Config::CallFilter` is attached to the origin of + // the dispatched call. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input data is placed. + // - `input_len`: the length of the input data in bytes. + // + // # Return Value + // + // Returns `ReturnCode::Success` when the dispatchable was succesfully executed and + // returned `Ok`. When the dispatchable was exeuted but returned an error + // `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not + // provided because it is not guaranteed to be stable. + // + // # Comparison with `ChainExtension` + // + // Just as a chain extension this API allows the runtime to extend the functionality + // of contracts. While making use of this function is generelly easier it cannot be + // used in call cases. Consider writing a chain extension if you need to do perform + // one of the following tasks: + // + // - Return data. + // - Provide functionality **exclusively** to contracts. + // - Provide custom weights. + // - Avoid the need to keep the `Call` data structure stable. + // + // # Unstable + // + // This function is unstable and subject to change (or removal) in the future. Do not + // deploy a contract using it to a production chain. + [__unstable__] seal_call_runtime(ctx, call_ptr: u32, call_len: u32) -> ReturnCode => { + use frame_support::{dispatch::GetDispatchInfo, weights::extract_actual_weight}; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; - let call: ::RuntimeCall = - ctx.read_sandbox_memory_as_unbounded(call_ptr, call_len)?; + let call: ::Call = ctx.read_sandbox_memory_as_unbounded( + call_ptr, call_len + )?; let dispatch_info = call.get_dispatch_info(); let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; let result = ctx.ext.call_runtime(call); @@ -2323,32 +2121,27 @@ pub mod env { Ok(_) => Ok(ReturnCode::Success), Err(_) => Ok(ReturnCode::CallRuntimeReturnedError), } - } + }, - /// Recovers the ECDSA public key from the given message hash and signature. - /// - /// Writes the public key into the given output buffer. - /// Assumes the secp256k1 curve. - /// - /// # Parameters - /// - /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should - /// be decodable as a 65 bytes. Traps otherwise. - /// - `message_hash_ptr`: the pointer into the linear memory where the message hash is placed. - /// Should be decodable as a 32 bytes. Traps otherwise. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// buffer should be 33 bytes. The function will write the result directly into this buffer. - /// - /// # Errors - /// - /// `ReturnCode::EcdsaRecoverFailed` - #[prefixed_alias] - fn ecdsa_recover( - ctx: Runtime, - signature_ptr: u32, - message_hash_ptr: u32, - output_ptr: u32, - ) -> Result { + // Recovers the ECDSA public key from the given message hash and signature. + // + // Writes the public key into the given output buffer. + // Assumes the secp256k1 curve. + // + // # Parameters + // + // - `signature_ptr`: the pointer into the linear memory where the signature + // is placed. Should be decodable as a 65 bytes. Traps otherwise. + // - `message_hash_ptr`: the pointer into the linear memory where the message + // hash is placed. Should be decodable as a 32 bytes. Traps otherwise. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The buffer should be 33 bytes. The function + // will write the result directly into this buffer. + // + // # Errors + // + // `ReturnCode::EcdsaRecoverFailed` + [seal0] seal_ecdsa_recover(ctx, signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::EcdsaRecovery)?; let mut signature: [u8; 65] = [0; 65]; @@ -2368,72 +2161,65 @@ pub mod env { }, Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), } - } + }, - /// Replace the contract code at the specified address with new code. - /// - /// # Note - /// - /// There are a couple of important considerations which must be taken into account when - /// using this API: - /// - /// 1. The storage at the code address will remain untouched. This means that contract - /// developers must ensure that the storage layout of the new code is compatible with that of - /// the old code. - /// - /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another - /// way, when using this API you lose the guarantee that an address always identifies a specific - /// code hash. - /// - /// 3. If a contract calls into itself after changing its code the new call would use - /// the new code. However, if the original caller panics after returning from the sub call it - /// would revert the changes made by `seal_set_code_hash` and the next caller would use - /// the old code. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. - /// - /// # Errors - /// - /// `ReturnCode::CodeNotFound` - #[prefixed_alias] - fn set_code_hash(ctx: Runtime, code_hash_ptr: u32) -> Result { + // Replace the contract code at the specified address with new code. + // + // # Note + // + // There are a couple of important considerations which must be taken into account when + // using this API: + // + // 1. The storage at the code address will remain untouched. This means that contract developers + // must ensure that the storage layout of the new code is compatible with that of the old code. + // + // 2. Contracts using this API can't be assumed as having deterministic addresses. Said another way, + // when using this API you lose the guarantee that an address always identifies a specific code hash. + // + // 3. If a contract calls into itself after changing its code the new call would use + // the new code. However, if the original caller panics after returning from the sub call it + // would revert the changes made by `seal_set_code_hash` and the next caller would use + // the old code. + // + // # Parameters + // + // - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. + // + // # Errors + // + // `ReturnCode::CodeNotFound` + [seal0] seal_set_code_hash(ctx, code_hash_ptr: u32) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::SetCodeHash)?; let code_hash: CodeHash<::T> = ctx.read_sandbox_memory_as(code_hash_ptr)?; match ctx.ext.set_code_hash(code_hash) { - Err(err) => { + Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) }, - Ok(()) => Ok(ReturnCode::Success), + Ok(()) => Ok(ReturnCode::Success) } - } + }, - /// Calculates Ethereum address from the ECDSA compressed public key and stores - /// it into the supplied buffer. - /// - /// # Parameters - /// - /// - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes - /// value. Traps otherwise. - /// - `out_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// If the available space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Errors - /// - /// `ReturnCode::EcdsaRecoverFailed` - #[prefixed_alias] - fn ecdsa_to_eth_address( - ctx: Runtime, - key_ptr: u32, - out_ptr: u32, - ) -> Result { + // Calculates Ethereum address from the ECDSA compressed public key and stores + // it into the supplied buffer. + // + // # Parameters + // + // - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes value. + // Traps otherwise. + // - `out_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // If the available space at `out_ptr` is less than the size of the value a trap is triggered. + // + // # Errors + // + // `ReturnCode::EcdsaRecoverFailed` + [seal0] seal_ecdsa_to_eth_address(ctx, key_ptr: u32, out_ptr: u32) -> ReturnCode => { ctx.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; - let mut compressed_key: [u8; 33] = [0; 33]; + let mut compressed_key: [u8; 33] = [0;33]; ctx.read_sandbox_memory_into_buf(key_ptr, &mut compressed_key)?; let result = ctx.ext.ecdsa_to_eth_address(&compressed_key); match result { @@ -2443,5 +2229,5 @@ pub mod env { }, Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), } - } -} + }, +); diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 8632124c0200d..3c90579e65d53 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-06-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet // --chain=dev @@ -35,7 +35,6 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/contracts/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -167,75 +166,67 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - // Minimum execution time: 3_064 nanoseconds. - Weight::from_ref_time(3_236_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (1_654_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - // Minimum execution time: 15_492 nanoseconds. - Weight::from_ref_time(14_309_233 as u64) - // Standard Error: 649 - .saturating_add(Weight::from_ref_time(930_078 as u64).saturating_mul(k as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) + (8_564_000 as Weight) + // Standard Error: 0 + .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) - /// The range of component `q` is `[0, 128]`. + /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - // Minimum execution time: 3_240 nanoseconds. - Weight::from_ref_time(15_076_559 as u64) - // Standard Error: 3_337 - .saturating_add(Weight::from_ref_time(1_244_348 as u64).saturating_mul(q as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (0 as Weight) + // Standard Error: 5_000 + .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - // Minimum execution time: 22_524 nanoseconds. - Weight::from_ref_time(19_939_078 as u64) - // Standard Error: 43 - .saturating_add(Weight::from_ref_time(43_802 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (19_016_000 as Weight) + // Standard Error: 0 + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) - // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - // Minimum execution time: 261_039 nanoseconds. - Weight::from_ref_time(228_709_853 as u64) - // Standard Error: 105 - .saturating_add(Weight::from_ref_time(47_449 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (205_194_000 as Weight) + // Standard Error: 0 + .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) - // Storage: System EventTopics (r:3 w:3) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - // Minimum execution time: 2_054_867 nanoseconds. - Weight::from_ref_time(259_090_306 as u64) - // Standard Error: 72 - .saturating_add(Weight::from_ref_time(107_519 as u64).saturating_mul(c as u64)) - // Standard Error: 4 - .saturating_add(Weight::from_ref_time(1_736 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().writes(9 as u64)) + (288_487_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -243,606 +234,526 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) - // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - // Minimum execution time: 213_409 nanoseconds. - Weight::from_ref_time(205_300_495 as u64) - // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_479 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().writes(7 as u64)) + (186_136_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) - // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - // Minimum execution time: 183_317 nanoseconds. - Weight::from_ref_time(184_465_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (149_232_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) - // Storage: System EventTopics (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - // Minimum execution time: 56_187 nanoseconds. - Weight::from_ref_time(60_636_621 as u64) - // Standard Error: 46 - .saturating_add(Weight::from_ref_time(45_734 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (51_721_000 as Weight) + // Standard Error: 0 + .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) - // Storage: System EventTopics (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - // Minimum execution time: 38_433 nanoseconds. - Weight::from_ref_time(38_917_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (30_016_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) - // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - // Minimum execution time: 41_507 nanoseconds. - Weight::from_ref_time(41_938_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (27_192_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - // Minimum execution time: 249_628 nanoseconds. - Weight::from_ref_time(251_997_923 as u64) - // Standard Error: 26_157 - .saturating_add(Weight::from_ref_time(35_002_004 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (206_405_000 as Weight) + // Standard Error: 112_000 + .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - // Minimum execution time: 249_390 nanoseconds. - Weight::from_ref_time(193_793_052 as u64) - // Standard Error: 430_292 - .saturating_add(Weight::from_ref_time(211_029_686 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (106_220_000 as Weight) + // Standard Error: 710_000 + .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 252_469 nanoseconds. - Weight::from_ref_time(201_438_856 as u64) - // Standard Error: 420_040 - .saturating_add(Weight::from_ref_time(267_340_744 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (104_498_000 as Weight) + // Standard Error: 633_000 + .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 251_154 nanoseconds. - Weight::from_ref_time(254_831_062 as u64) - // Standard Error: 37_843 - .saturating_add(Weight::from_ref_time(38_579_567 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (208_696_000 as Weight) + // Standard Error: 101_000 + .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - // Minimum execution time: 247_875 nanoseconds. - Weight::from_ref_time(250_312_587 as u64) - // Standard Error: 17_901 - .saturating_add(Weight::from_ref_time(15_153_431 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (205_612_000 as Weight) + // Standard Error: 68_000 + .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - // Minimum execution time: 250_097 nanoseconds. - Weight::from_ref_time(252_157_442 as u64) - // Standard Error: 38_426 - .saturating_add(Weight::from_ref_time(35_084_205 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (206_947_000 as Weight) + // Standard Error: 107_000 + .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - // Minimum execution time: 250_034 nanoseconds. - Weight::from_ref_time(252_189_233 as u64) - // Standard Error: 33_081 - .saturating_add(Weight::from_ref_time(34_764_160 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (208_692_000 as Weight) + // Standard Error: 109_000 + .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - // Minimum execution time: 249_587 nanoseconds. - Weight::from_ref_time(258_565_111 as u64) - // Standard Error: 75_715 - .saturating_add(Weight::from_ref_time(109_687_486 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (209_811_000 as Weight) + // Standard Error: 208_000 + .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - // Minimum execution time: 249_735 nanoseconds. - Weight::from_ref_time(252_875_784 as u64) - // Standard Error: 42_024 - .saturating_add(Weight::from_ref_time(34_555_983 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (207_406_000 as Weight) + // Standard Error: 117_000 + .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - // Minimum execution time: 250_025 nanoseconds. - Weight::from_ref_time(255_212_046 as u64) - // Standard Error: 41_865 - .saturating_add(Weight::from_ref_time(34_332_291 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (209_260_000 as Weight) + // Standard Error: 130_000 + .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - // Minimum execution time: 247_641 nanoseconds. - Weight::from_ref_time(252_978_686 as u64) - // Standard Error: 25_820 - .saturating_add(Weight::from_ref_time(34_175_386 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (206_448_000 as Weight) + // Standard Error: 95_000 + .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - // Minimum execution time: 249_871 nanoseconds. - Weight::from_ref_time(253_237_931 as u64) - // Standard Error: 30_986 - .saturating_add(Weight::from_ref_time(34_305_155 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (206_969_000 as Weight) + // Standard Error: 116_000 + .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - // Minimum execution time: 249_787 nanoseconds. - Weight::from_ref_time(258_457_094 as u64) - // Standard Error: 75_835 - .saturating_add(Weight::from_ref_time(107_115_666 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (211_611_000 as Weight) + // Standard Error: 175_000 + .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - // Minimum execution time: 171_667 nanoseconds. - Weight::from_ref_time(174_687_863 as u64) - // Standard Error: 34_576 - .saturating_add(Weight::from_ref_time(15_895_674 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (134_484_000 as Weight) + // Standard Error: 57_000 + .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - // Minimum execution time: 249_610 nanoseconds. - Weight::from_ref_time(251_476_758 as u64) - // Standard Error: 39_422 - .saturating_add(Weight::from_ref_time(32_870_429 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (208_556_000 as Weight) + // Standard Error: 125_000 + .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 285_154 nanoseconds. - Weight::from_ref_time(307_768_636 as u64) - // Standard Error: 2_701 - .saturating_add(Weight::from_ref_time(9_544_122 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (268_886_000 as Weight) + // Standard Error: 4_000 + .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { - // Minimum execution time: 244_810 nanoseconds. - Weight::from_ref_time(247_576_385 as u64) - // Standard Error: 80_494 - .saturating_add(Weight::from_ref_time(2_052_714 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn seal_return(_r: u32, ) -> Weight { + (203_591_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 248_049 nanoseconds. - Weight::from_ref_time(250_148_025 as u64) - // Standard Error: 339 - .saturating_add(Weight::from_ref_time(185_344 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (204_258_000 as Weight) + // Standard Error: 0 + .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - // Minimum execution time: 246_620 nanoseconds. - Weight::from_ref_time(250_752_277 as u64) - // Standard Error: 84_300 - .saturating_add(Weight::from_ref_time(54_264_722 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((6 as u64).saturating_mul(r as u64))) + (206_625_000 as Weight) + // Standard Error: 672_000 + .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - // Minimum execution time: 249_065 nanoseconds. - Weight::from_ref_time(252_419_902 as u64) - // Standard Error: 84_223 - .saturating_add(Weight::from_ref_time(134_454_079 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (208_866_000 as Weight) + // Standard Error: 164_000 + .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - // Minimum execution time: 246_588 nanoseconds. - Weight::from_ref_time(261_525_328 as u64) - // Standard Error: 97_732 - .saturating_add(Weight::from_ref_time(235_555_878 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (220_860_000 as Weight) + // Standard Error: 209_000 + .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) + // Storage: System EventTopics (r:80 w:80) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - // Minimum execution time: 1_171_144 nanoseconds. - Weight::from_ref_time(490_333_337 as u64) - // Standard Error: 404_664 - .saturating_add(Weight::from_ref_time(173_683_265 as u64).saturating_mul(t as u64)) - // Standard Error: 111_140 - .saturating_add(Weight::from_ref_time(66_081_822 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(t as u64))) + (439_782_000 as Weight) + // Standard Error: 1_643_000 + .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 323_000 + .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(t as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - // Minimum execution time: 178_822 nanoseconds. - Weight::from_ref_time(181_571_518 as u64) - // Standard Error: 19_207 - .saturating_add(Weight::from_ref_time(26_784_712 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (140_280_000 as Weight) + // Standard Error: 82_000 + .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - // Minimum execution time: 249_737 nanoseconds. - Weight::from_ref_time(208_095_467 as u64) - // Standard Error: 417_236 - .saturating_add(Weight::from_ref_time(430_088_574 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (161_247_000 as Weight) + // Standard Error: 883_000 + .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - // Minimum execution time: 400_055 nanoseconds. - Weight::from_ref_time(551_666_883 as u64) - // Standard Error: 1_379_652 - .saturating_add(Weight::from_ref_time(94_069_118 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(52 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(50 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (529_247_000 as Weight) + // Standard Error: 2_745_000 + .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(53 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - // Minimum execution time: 400_370 nanoseconds. - Weight::from_ref_time(521_380_000 as u64) - // Standard Error: 1_112_618 - .saturating_add(Weight::from_ref_time(68_664_898 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(49 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (529_812_000 as Weight) + // Standard Error: 2_513_000 + .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(53 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - // Minimum execution time: 249_711 nanoseconds. - Weight::from_ref_time(212_629_798 as u64) - // Standard Error: 378_159 - .saturating_add(Weight::from_ref_time(415_326_230 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (184_803_000 as Weight) + // Standard Error: 733_000 + .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 365_702 nanoseconds. - Weight::from_ref_time(499_337_686 as u64) - // Standard Error: 1_232_330 - .saturating_add(Weight::from_ref_time(70_648_878 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(48 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (500_958_000 as Weight) + // Standard Error: 2_980_000 + .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(52 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - // Minimum execution time: 251_357 nanoseconds. - Weight::from_ref_time(220_533_580 as u64) - // Standard Error: 345_297 - .saturating_add(Weight::from_ref_time(349_413_968 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (177_682_000 as Weight) + // Standard Error: 743_000 + .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 354_162 nanoseconds. - Weight::from_ref_time(472_811_575 as u64) - // Standard Error: 1_109_282 - .saturating_add(Weight::from_ref_time(154_074_386 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (465_285_000 as Weight) + // Standard Error: 2_599_000 + .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - // Minimum execution time: 247_551 nanoseconds. - Weight::from_ref_time(219_176_526 as u64) - // Standard Error: 358_914 - .saturating_add(Weight::from_ref_time(326_009_513 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (179_118_000 as Weight) + // Standard Error: 572_000 + .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 339_149 nanoseconds. - Weight::from_ref_time(440_615_016 as u64) - // Standard Error: 954_837 - .saturating_add(Weight::from_ref_time(66_153_533 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (423_056_000 as Weight) + // Standard Error: 2_037_000 + .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(54 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - // Minimum execution time: 251_812 nanoseconds. - Weight::from_ref_time(209_954_069 as u64) - // Standard Error: 398_380 - .saturating_add(Weight::from_ref_time(438_573_954 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (188_884_000 as Weight) + // Standard Error: 761_000 + .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 374_594 nanoseconds. - Weight::from_ref_time(525_213_792 as u64) - // Standard Error: 1_378_489 - .saturating_add(Weight::from_ref_time(161_599_623 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(48 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (532_408_000 as Weight) + // Standard Error: 3_348_000 + .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(55 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(53 as Weight)) + .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - // Minimum execution time: 251_379 nanoseconds. - Weight::from_ref_time(204_214_298 as u64) - // Standard Error: 662_575 - .saturating_add(Weight::from_ref_time(1_366_716_853 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (127_181_000 as Weight) + // Standard Error: 1_495_000 + .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - // Minimum execution time: 252_896 nanoseconds. - Weight::from_ref_time(253_811_000 as u64) - // Standard Error: 6_576_179 - .saturating_add(Weight::from_ref_time(17_254_952_849 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((160 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 3_803_000 + .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - // Minimum execution time: 249_312 nanoseconds. - Weight::from_ref_time(253_806_000 as u64) - // Standard Error: 6_118_873 - .saturating_add(Weight::from_ref_time(17_081_370_212 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((150 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((75 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 6_045_000 + .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - // Minimum execution time: 12_001_522 nanoseconds. - Weight::from_ref_time(10_903_312_955 as u64) - // Standard Error: 4_301_096 - .saturating_add(Weight::from_ref_time(1_243_413_241 as u64).saturating_mul(t as u64)) - // Standard Error: 6_449 - .saturating_add(Weight::from_ref_time(9_713_655 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(167 as u64)) - .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) - .saturating_add(T::DbWeight::get().writes(163 as u64)) - .saturating_add(T::DbWeight::get().writes((81 as u64).saturating_mul(t as u64))) + (9_196_444_000 as Weight) + // Standard Error: 20_486_000 + .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 8_000 + .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(85 as Weight)) + .saturating_add(T::DbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) + .saturating_add(T::DbWeight::get().writes(81 as Weight)) + .saturating_add(T::DbWeight::get().writes((81 as Weight).saturating_mul(t as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - // Minimum execution time: 254_969 nanoseconds. - Weight::from_ref_time(255_984_000 as u64) - // Standard Error: 18_545_048 - .saturating_add(Weight::from_ref_time(22_343_189_765 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(5 as u64)) - .saturating_add(T::DbWeight::get().writes((400 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 36_253_000 + .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((320 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -850,532 +761,457 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) - // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - // Minimum execution time: 14_077_497 nanoseconds. - Weight::from_ref_time(13_949_740_588 as u64) - // Standard Error: 66_631 - .saturating_add(Weight::from_ref_time(120_519_572 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(249 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) - .saturating_add(T::DbWeight::get().writes(247 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(t as u64))) + (12_282_498_000 as Weight) + // Standard Error: 48_112_000 + .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 22_000 + .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(167 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) + .saturating_add(T::DbWeight::get().writes(165 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(t as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Minimum execution time: 247_445 nanoseconds. - Weight::from_ref_time(251_229_791 as u64) - // Standard Error: 88_045 - .saturating_add(Weight::from_ref_time(57_577_008 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (203_959_000 as Weight) + // Standard Error: 142_000 + .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 308_069 nanoseconds. - Weight::from_ref_time(308_971_000 as u64) - // Standard Error: 46_181 - .saturating_add(Weight::from_ref_time(321_835_684 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (349_915_000 as Weight) + // Standard Error: 40_000 + .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - // Minimum execution time: 247_107 nanoseconds. - Weight::from_ref_time(250_125_030 as u64) - // Standard Error: 88_769 - .saturating_add(Weight::from_ref_time(70_727_669 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (209_219_000 as Weight) + // Standard Error: 157_000 + .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 319_515 nanoseconds. - Weight::from_ref_time(319_784_000 as u64) - // Standard Error: 58_896 - .saturating_add(Weight::from_ref_time(246_433_962 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (208_860_000 as Weight) + // Standard Error: 25_000 + .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Minimum execution time: 247_887 nanoseconds. - Weight::from_ref_time(250_452_702 as u64) - // Standard Error: 140_887 - .saturating_add(Weight::from_ref_time(49_538_397 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (206_165_000 as Weight) + // Standard Error: 138_000 + .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 297_534 nanoseconds. - Weight::from_ref_time(298_249_000 as u64) - // Standard Error: 49_680 - .saturating_add(Weight::from_ref_time(99_001_103 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (255_955_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Minimum execution time: 245_926 nanoseconds. - Weight::from_ref_time(248_471_834 as u64) - // Standard Error: 101_639 - .saturating_add(Weight::from_ref_time(47_889_865 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (208_153_000 as Weight) + // Standard Error: 140_000 + .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 294_835 nanoseconds. - Weight::from_ref_time(296_328_000 as u64) - // Standard Error: 46_612 - .saturating_add(Weight::from_ref_time(98_859_152 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (278_368_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - // Minimum execution time: 251_104 nanoseconds. - Weight::from_ref_time(253_114_893 as u64) - // Standard Error: 316_740 - .saturating_add(Weight::from_ref_time(2_964_072_706 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (331_955_000 as Weight) + // Standard Error: 1_155_000 + .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - // Minimum execution time: 250_048 nanoseconds. - Weight::from_ref_time(251_774_991 as u64) - // Standard Error: 115_294 - .saturating_add(Weight::from_ref_time(2_094_245_208 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (207_838_000 as Weight) + // Standard Error: 783_000 + .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 250_830 nanoseconds. - Weight::from_ref_time(251_477_000 as u64) - // Standard Error: 2_727_998 - .saturating_add(Weight::from_ref_time(1_390_149_283 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((225 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((150 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 1_567_000 + .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - // Minimum execution time: 69_022 nanoseconds. - Weight::from_ref_time(69_707_657 as u64) - // Standard Error: 8_674 - .saturating_add(Weight::from_ref_time(887_555 as u64).saturating_mul(r as u64)) + (73_955_000 as Weight) + // Standard Error: 1_000 + .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - // Minimum execution time: 69_491 nanoseconds. - Weight::from_ref_time(70_354_670 as u64) - // Standard Error: 1_518 - .saturating_add(Weight::from_ref_time(2_758_912 as u64).saturating_mul(r as u64)) + (74_057_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - // Minimum execution time: 69_156 nanoseconds. - Weight::from_ref_time(69_917_601 as u64) - // Standard Error: 1_970 - .saturating_add(Weight::from_ref_time(2_753_174 as u64).saturating_mul(r as u64)) + (74_137_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - // Minimum execution time: 68_944 nanoseconds. - Weight::from_ref_time(69_727_961 as u64) - // Standard Error: 376 - .saturating_add(Weight::from_ref_time(2_356_996 as u64).saturating_mul(r as u64)) + (73_844_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - // Minimum execution time: 68_971 nanoseconds. - Weight::from_ref_time(69_755_949 as u64) - // Standard Error: 543 - .saturating_add(Weight::from_ref_time(2_489_510 as u64).saturating_mul(r as u64)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - // Minimum execution time: 69_061 nanoseconds. - Weight::from_ref_time(69_625_000 as u64) - // Standard Error: 486 - .saturating_add(Weight::from_ref_time(1_431_684 as u64).saturating_mul(r as u64)) + (73_924_000 as Weight) + // Standard Error: 3_000 + .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - // Minimum execution time: 69_058 nanoseconds. - Weight::from_ref_time(69_521_790 as u64) - // Standard Error: 892 - .saturating_add(Weight::from_ref_time(1_964_054 as u64).saturating_mul(r as u64)) + (73_574_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - // Minimum execution time: 69_020 nanoseconds. - Weight::from_ref_time(69_344_255 as u64) - // Standard Error: 1_408 - .saturating_add(Weight::from_ref_time(2_169_179 as u64).saturating_mul(r as u64)) + (73_343_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - // Minimum execution time: 72_366 nanoseconds. - Weight::from_ref_time(72_869_594 as u64) - // Standard Error: 73 - .saturating_add(Weight::from_ref_time(3_867 as u64).saturating_mul(e as u64)) + (76_267_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - // Minimum execution time: 69_164 nanoseconds. - Weight::from_ref_time(70_269_099 as u64) - // Standard Error: 8_824 - .saturating_add(Weight::from_ref_time(6_594_634 as u64).saturating_mul(r as u64)) + (74_877_000 as Weight) + // Standard Error: 12_000 + .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - // Minimum execution time: 83_348 nanoseconds. - Weight::from_ref_time(84_968_895 as u64) - // Standard Error: 6_305 - .saturating_add(Weight::from_ref_time(8_395_193 as u64).saturating_mul(r as u64)) + (88_665_000 as Weight) + // Standard Error: 20_000 + .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - // Minimum execution time: 92_358 nanoseconds. - Weight::from_ref_time(93_605_536 as u64) - // Standard Error: 2_019 - .saturating_add(Weight::from_ref_time(536_495 as u64).saturating_mul(p as u64)) + (98_600_000 as Weight) + // Standard Error: 2_000 + .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - // Minimum execution time: 69_191 nanoseconds. - Weight::from_ref_time(70_407_702 as u64) - // Standard Error: 2_812 - .saturating_add(Weight::from_ref_time(901_706 as u64).saturating_mul(r as u64)) + (74_555_000 as Weight) + // Standard Error: 1_000 + .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - // Minimum execution time: 69_230 nanoseconds. - Weight::from_ref_time(70_255_278 as u64) - // Standard Error: 1_284 - .saturating_add(Weight::from_ref_time(951_754 as u64).saturating_mul(r as u64)) + (74_329_000 as Weight) + // Standard Error: 1_000 + .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - // Minimum execution time: 69_278 nanoseconds. - Weight::from_ref_time(70_089_139 as u64) - // Standard Error: 757 - .saturating_add(Weight::from_ref_time(1_369_185 as u64).saturating_mul(r as u64)) + (74_612_000 as Weight) + // Standard Error: 1_000 + .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - // Minimum execution time: 72_047 nanoseconds. - Weight::from_ref_time(72_783_972 as u64) - // Standard Error: 837 - .saturating_add(Weight::from_ref_time(1_471_680 as u64).saturating_mul(r as u64)) + (76_906_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - // Minimum execution time: 71_960 nanoseconds. - Weight::from_ref_time(72_745_981 as u64) - // Standard Error: 1_086 - .saturating_add(Weight::from_ref_time(1_537_741 as u64).saturating_mul(r as u64)) + (76_979_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - // Minimum execution time: 69_221 nanoseconds. - Weight::from_ref_time(70_010_862 as u64) - // Standard Error: 1_845 - .saturating_add(Weight::from_ref_time(933_738 as u64).saturating_mul(r as u64)) + (74_370_000 as Weight) + // Standard Error: 3_000 + .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - // Minimum execution time: 69_081 nanoseconds. - Weight::from_ref_time(71_015_495 as u64) - // Standard Error: 27_078 - .saturating_add(Weight::from_ref_time(183_899_704 as u64).saturating_mul(r as u64)) + (73_584_000 as Weight) + // Standard Error: 353_000 + .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - // Minimum execution time: 70_589 nanoseconds. - Weight::from_ref_time(70_175_537 as u64) - // Standard Error: 1_355 - .saturating_add(Weight::from_ref_time(1_323_745 as u64).saturating_mul(r as u64)) + (74_206_000 as Weight) + // Standard Error: 1_000 + .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - // Minimum execution time: 69_083 nanoseconds. - Weight::from_ref_time(69_832_339 as u64) - // Standard Error: 818 - .saturating_add(Weight::from_ref_time(1_334_198 as u64).saturating_mul(r as u64)) + (73_992_000 as Weight) + // Standard Error: 1_000 + .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - // Minimum execution time: 69_084 nanoseconds. - Weight::from_ref_time(69_802_701 as u64) - // Standard Error: 744 - .saturating_add(Weight::from_ref_time(1_334_601 as u64).saturating_mul(r as u64)) + (73_985_000 as Weight) + // Standard Error: 2_000 + .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - // Minimum execution time: 69_052 nanoseconds. - Weight::from_ref_time(69_717_748 as u64) - // Standard Error: 571 - .saturating_add(Weight::from_ref_time(1_346_564 as u64).saturating_mul(r as u64)) + (74_117_000 as Weight) + // Standard Error: 4_000 + .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - // Minimum execution time: 69_016 nanoseconds. - Weight::from_ref_time(69_793_413 as u64) - // Standard Error: 769 - .saturating_add(Weight::from_ref_time(1_317_502 as u64).saturating_mul(r as u64)) + (73_981_000 as Weight) + // Standard Error: 1_000 + .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - // Minimum execution time: 69_043 nanoseconds. - Weight::from_ref_time(69_963_419 as u64) - // Standard Error: 1_117 - .saturating_add(Weight::from_ref_time(1_313_727 as u64).saturating_mul(r as u64)) + (74_104_000 as Weight) + // Standard Error: 3_000 + .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - // Minimum execution time: 69_032 nanoseconds. - Weight::from_ref_time(69_727_577 as u64) - // Standard Error: 662 - .saturating_add(Weight::from_ref_time(1_331_088 as u64).saturating_mul(r as u64)) + (74_293_000 as Weight) + // Standard Error: 3_000 + .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - // Minimum execution time: 69_097 nanoseconds. - Weight::from_ref_time(69_767_650 as u64) - // Standard Error: 2_056 - .saturating_add(Weight::from_ref_time(1_875_021 as u64).saturating_mul(r as u64)) + (74_055_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - // Minimum execution time: 69_153 nanoseconds. - Weight::from_ref_time(69_906_946 as u64) - // Standard Error: 1_060 - .saturating_add(Weight::from_ref_time(1_867_154 as u64).saturating_mul(r as u64)) + (73_710_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - // Minimum execution time: 70_380 nanoseconds. - Weight::from_ref_time(69_867_328 as u64) - // Standard Error: 778 - .saturating_add(Weight::from_ref_time(1_869_718 as u64).saturating_mul(r as u64)) + (73_917_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - // Minimum execution time: 69_259 nanoseconds. - Weight::from_ref_time(69_695_407 as u64) - // Standard Error: 746 - .saturating_add(Weight::from_ref_time(1_874_772 as u64).saturating_mul(r as u64)) + (74_048_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - // Minimum execution time: 68_986 nanoseconds. - Weight::from_ref_time(70_027_081 as u64) - // Standard Error: 1_401 - .saturating_add(Weight::from_ref_time(1_862_971 as u64).saturating_mul(r as u64)) + (74_029_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - // Minimum execution time: 68_953 nanoseconds. - Weight::from_ref_time(69_798_073 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_871_888 as u64).saturating_mul(r as u64)) + (74_267_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - // Minimum execution time: 68_909 nanoseconds. - Weight::from_ref_time(69_845_981 as u64) - // Standard Error: 775 - .saturating_add(Weight::from_ref_time(1_868_722 as u64).saturating_mul(r as u64)) + (73_952_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - // Minimum execution time: 68_986 nanoseconds. - Weight::from_ref_time(69_683_189 as u64) - // Standard Error: 503 - .saturating_add(Weight::from_ref_time(1_884_715 as u64).saturating_mul(r as u64)) + (73_851_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - // Minimum execution time: 69_230 nanoseconds. - Weight::from_ref_time(69_765_336 as u64) - // Standard Error: 2_060 - .saturating_add(Weight::from_ref_time(1_871_848 as u64).saturating_mul(r as u64)) + (74_034_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - // Minimum execution time: 68_953 nanoseconds. - Weight::from_ref_time(69_828_265 as u64) - // Standard Error: 951 - .saturating_add(Weight::from_ref_time(1_868_596 as u64).saturating_mul(r as u64)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - // Minimum execution time: 69_078 nanoseconds. - Weight::from_ref_time(69_832_768 as u64) - // Standard Error: 894 - .saturating_add(Weight::from_ref_time(1_845_786 as u64).saturating_mul(r as u64)) + (74_000_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - // Minimum execution time: 68_939 nanoseconds. - Weight::from_ref_time(69_676_256 as u64) - // Standard Error: 374 - .saturating_add(Weight::from_ref_time(1_851_026 as u64).saturating_mul(r as u64)) + (73_883_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - // Minimum execution time: 69_096 nanoseconds. - Weight::from_ref_time(69_914_159 as u64) - // Standard Error: 1_265 - .saturating_add(Weight::from_ref_time(1_844_489 as u64).saturating_mul(r as u64)) + (74_216_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - // Minimum execution time: 68_939 nanoseconds. - Weight::from_ref_time(69_641_768 as u64) - // Standard Error: 347 - .saturating_add(Weight::from_ref_time(2_488_628 as u64).saturating_mul(r as u64)) + (73_989_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - // Minimum execution time: 69_114 nanoseconds. - Weight::from_ref_time(69_844_395 as u64) - // Standard Error: 1_489 - .saturating_add(Weight::from_ref_time(2_456_310 as u64).saturating_mul(r as u64)) + (73_857_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - // Minimum execution time: 69_082 nanoseconds. - Weight::from_ref_time(69_993_662 as u64) - // Standard Error: 1_218 - .saturating_add(Weight::from_ref_time(2_524_010 as u64).saturating_mul(r as u64)) + (73_801_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - // Minimum execution time: 69_036 nanoseconds. - Weight::from_ref_time(70_095_304 as u64) - // Standard Error: 1_473 - .saturating_add(Weight::from_ref_time(2_429_659 as u64).saturating_mul(r as u64)) + (74_130_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - // Minimum execution time: 69_229 nanoseconds. - Weight::from_ref_time(69_759_818 as u64) - // Standard Error: 573 - .saturating_add(Weight::from_ref_time(1_879_670 as u64).saturating_mul(r as u64)) + (74_071_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - // Minimum execution time: 69_151 nanoseconds. - Weight::from_ref_time(69_865_948 as u64) - // Standard Error: 721 - .saturating_add(Weight::from_ref_time(1_846_734 as u64).saturating_mul(r as u64)) + (74_201_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - // Minimum execution time: 69_120 nanoseconds. - Weight::from_ref_time(70_135_849 as u64) - // Standard Error: 3_443 - .saturating_add(Weight::from_ref_time(1_841_784 as u64).saturating_mul(r as u64)) + (74_241_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - // Minimum execution time: 69_077 nanoseconds. - Weight::from_ref_time(69_929_746 as u64) - // Standard Error: 821 - .saturating_add(Weight::from_ref_time(1_866_348 as u64).saturating_mul(r as u64)) + (74_331_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - // Minimum execution time: 69_226 nanoseconds. - Weight::from_ref_time(69_725_630 as u64) - // Standard Error: 891 - .saturating_add(Weight::from_ref_time(1_873_637 as u64).saturating_mul(r as u64)) + (73_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - // Minimum execution time: 70_591 nanoseconds. - Weight::from_ref_time(69_939_773 as u64) - // Standard Error: 960 - .saturating_add(Weight::from_ref_time(1_867_208 as u64).saturating_mul(r as u64)) + (73_807_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - // Minimum execution time: 69_187 nanoseconds. - Weight::from_ref_time(69_845_516 as u64) - // Standard Error: 781 - .saturating_add(Weight::from_ref_time(1_869_613 as u64).saturating_mul(r as u64)) + (73_725_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - // Minimum execution time: 69_065 nanoseconds. - Weight::from_ref_time(69_950_430 as u64) - // Standard Error: 986 - .saturating_add(Weight::from_ref_time(1_867_001 as u64).saturating_mul(r as u64)) + (73_755_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } } @@ -1383,75 +1219,67 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - // Minimum execution time: 3_064 nanoseconds. - Weight::from_ref_time(3_236_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (1_654_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - // Minimum execution time: 15_492 nanoseconds. - Weight::from_ref_time(14_309_233 as u64) - // Standard Error: 649 - .saturating_add(Weight::from_ref_time(930_078 as u64).saturating_mul(k as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) + (8_564_000 as Weight) + // Standard Error: 0 + .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) - /// The range of component `q` is `[0, 128]`. + /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - // Minimum execution time: 3_240 nanoseconds. - Weight::from_ref_time(15_076_559 as u64) - // Standard Error: 3_337 - .saturating_add(Weight::from_ref_time(1_244_348 as u64).saturating_mul(q as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (0 as Weight) + // Standard Error: 5_000 + .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - // Minimum execution time: 22_524 nanoseconds. - Weight::from_ref_time(19_939_078 as u64) - // Standard Error: 43 - .saturating_add(Weight::from_ref_time(43_802 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (19_016_000 as Weight) + // Standard Error: 0 + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) - // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - // Minimum execution time: 261_039 nanoseconds. - Weight::from_ref_time(228_709_853 as u64) - // Standard Error: 105 - .saturating_add(Weight::from_ref_time(47_449 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (205_194_000 as Weight) + // Standard Error: 0 + .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) - // Storage: System EventTopics (r:3 w:3) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - // Minimum execution time: 2_054_867 nanoseconds. - Weight::from_ref_time(259_090_306 as u64) - // Standard Error: 72 - .saturating_add(Weight::from_ref_time(107_519 as u64).saturating_mul(c as u64)) - // Standard Error: 4 - .saturating_add(Weight::from_ref_time(1_736 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().writes(9 as u64)) + (288_487_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1459,606 +1287,526 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) - // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - // Minimum execution time: 213_409 nanoseconds. - Weight::from_ref_time(205_300_495 as u64) - // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_479 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().writes(7 as u64)) + (186_136_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) - // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - // Minimum execution time: 183_317 nanoseconds. - Weight::from_ref_time(184_465_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (149_232_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) - // Storage: System EventTopics (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - // Minimum execution time: 56_187 nanoseconds. - Weight::from_ref_time(60_636_621 as u64) - // Standard Error: 46 - .saturating_add(Weight::from_ref_time(45_734 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (51_721_000 as Weight) + // Standard Error: 0 + .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) - // Storage: System EventTopics (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - // Minimum execution time: 38_433 nanoseconds. - Weight::from_ref_time(38_917_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (30_016_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) - // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - // Minimum execution time: 41_507 nanoseconds. - Weight::from_ref_time(41_938_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (27_192_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - // Minimum execution time: 249_628 nanoseconds. - Weight::from_ref_time(251_997_923 as u64) - // Standard Error: 26_157 - .saturating_add(Weight::from_ref_time(35_002_004 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (206_405_000 as Weight) + // Standard Error: 112_000 + .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - // Minimum execution time: 249_390 nanoseconds. - Weight::from_ref_time(193_793_052 as u64) - // Standard Error: 430_292 - .saturating_add(Weight::from_ref_time(211_029_686 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (106_220_000 as Weight) + // Standard Error: 710_000 + .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 252_469 nanoseconds. - Weight::from_ref_time(201_438_856 as u64) - // Standard Error: 420_040 - .saturating_add(Weight::from_ref_time(267_340_744 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (104_498_000 as Weight) + // Standard Error: 633_000 + .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 251_154 nanoseconds. - Weight::from_ref_time(254_831_062 as u64) - // Standard Error: 37_843 - .saturating_add(Weight::from_ref_time(38_579_567 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (208_696_000 as Weight) + // Standard Error: 101_000 + .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - // Minimum execution time: 247_875 nanoseconds. - Weight::from_ref_time(250_312_587 as u64) - // Standard Error: 17_901 - .saturating_add(Weight::from_ref_time(15_153_431 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (205_612_000 as Weight) + // Standard Error: 68_000 + .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - // Minimum execution time: 250_097 nanoseconds. - Weight::from_ref_time(252_157_442 as u64) - // Standard Error: 38_426 - .saturating_add(Weight::from_ref_time(35_084_205 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (206_947_000 as Weight) + // Standard Error: 107_000 + .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - // Minimum execution time: 250_034 nanoseconds. - Weight::from_ref_time(252_189_233 as u64) - // Standard Error: 33_081 - .saturating_add(Weight::from_ref_time(34_764_160 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (208_692_000 as Weight) + // Standard Error: 109_000 + .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - // Minimum execution time: 249_587 nanoseconds. - Weight::from_ref_time(258_565_111 as u64) - // Standard Error: 75_715 - .saturating_add(Weight::from_ref_time(109_687_486 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (209_811_000 as Weight) + // Standard Error: 208_000 + .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - // Minimum execution time: 249_735 nanoseconds. - Weight::from_ref_time(252_875_784 as u64) - // Standard Error: 42_024 - .saturating_add(Weight::from_ref_time(34_555_983 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (207_406_000 as Weight) + // Standard Error: 117_000 + .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - // Minimum execution time: 250_025 nanoseconds. - Weight::from_ref_time(255_212_046 as u64) - // Standard Error: 41_865 - .saturating_add(Weight::from_ref_time(34_332_291 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (209_260_000 as Weight) + // Standard Error: 130_000 + .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - // Minimum execution time: 247_641 nanoseconds. - Weight::from_ref_time(252_978_686 as u64) - // Standard Error: 25_820 - .saturating_add(Weight::from_ref_time(34_175_386 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (206_448_000 as Weight) + // Standard Error: 95_000 + .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - // Minimum execution time: 249_871 nanoseconds. - Weight::from_ref_time(253_237_931 as u64) - // Standard Error: 30_986 - .saturating_add(Weight::from_ref_time(34_305_155 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (206_969_000 as Weight) + // Standard Error: 116_000 + .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - // Minimum execution time: 249_787 nanoseconds. - Weight::from_ref_time(258_457_094 as u64) - // Standard Error: 75_835 - .saturating_add(Weight::from_ref_time(107_115_666 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (211_611_000 as Weight) + // Standard Error: 175_000 + .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - // Minimum execution time: 171_667 nanoseconds. - Weight::from_ref_time(174_687_863 as u64) - // Standard Error: 34_576 - .saturating_add(Weight::from_ref_time(15_895_674 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (134_484_000 as Weight) + // Standard Error: 57_000 + .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - // Minimum execution time: 249_610 nanoseconds. - Weight::from_ref_time(251_476_758 as u64) - // Standard Error: 39_422 - .saturating_add(Weight::from_ref_time(32_870_429 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (208_556_000 as Weight) + // Standard Error: 125_000 + .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 285_154 nanoseconds. - Weight::from_ref_time(307_768_636 as u64) - // Standard Error: 2_701 - .saturating_add(Weight::from_ref_time(9_544_122 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (268_886_000 as Weight) + // Standard Error: 4_000 + .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { - // Minimum execution time: 244_810 nanoseconds. - Weight::from_ref_time(247_576_385 as u64) - // Standard Error: 80_494 - .saturating_add(Weight::from_ref_time(2_052_714 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn seal_return(_r: u32, ) -> Weight { + (203_591_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 248_049 nanoseconds. - Weight::from_ref_time(250_148_025 as u64) - // Standard Error: 339 - .saturating_add(Weight::from_ref_time(185_344 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (204_258_000 as Weight) + // Standard Error: 0 + .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - // Minimum execution time: 246_620 nanoseconds. - Weight::from_ref_time(250_752_277 as u64) - // Standard Error: 84_300 - .saturating_add(Weight::from_ref_time(54_264_722 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((6 as u64).saturating_mul(r as u64))) + (206_625_000 as Weight) + // Standard Error: 672_000 + .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - // Minimum execution time: 249_065 nanoseconds. - Weight::from_ref_time(252_419_902 as u64) - // Standard Error: 84_223 - .saturating_add(Weight::from_ref_time(134_454_079 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (208_866_000 as Weight) + // Standard Error: 164_000 + .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - // Minimum execution time: 246_588 nanoseconds. - Weight::from_ref_time(261_525_328 as u64) - // Standard Error: 97_732 - .saturating_add(Weight::from_ref_time(235_555_878 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (220_860_000 as Weight) + // Standard Error: 209_000 + .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) + // Storage: System EventTopics (r:80 w:80) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - // Minimum execution time: 1_171_144 nanoseconds. - Weight::from_ref_time(490_333_337 as u64) - // Standard Error: 404_664 - .saturating_add(Weight::from_ref_time(173_683_265 as u64).saturating_mul(t as u64)) - // Standard Error: 111_140 - .saturating_add(Weight::from_ref_time(66_081_822 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(t as u64))) + (439_782_000 as Weight) + // Standard Error: 1_643_000 + .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 323_000 + .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(t as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - // Minimum execution time: 178_822 nanoseconds. - Weight::from_ref_time(181_571_518 as u64) - // Standard Error: 19_207 - .saturating_add(Weight::from_ref_time(26_784_712 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (140_280_000 as Weight) + // Standard Error: 82_000 + .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - // Minimum execution time: 249_737 nanoseconds. - Weight::from_ref_time(208_095_467 as u64) - // Standard Error: 417_236 - .saturating_add(Weight::from_ref_time(430_088_574 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (161_247_000 as Weight) + // Standard Error: 883_000 + .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - // Minimum execution time: 400_055 nanoseconds. - Weight::from_ref_time(551_666_883 as u64) - // Standard Error: 1_379_652 - .saturating_add(Weight::from_ref_time(94_069_118 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(52 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(50 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (529_247_000 as Weight) + // Standard Error: 2_745_000 + .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(53 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - // Minimum execution time: 400_370 nanoseconds. - Weight::from_ref_time(521_380_000 as u64) - // Standard Error: 1_112_618 - .saturating_add(Weight::from_ref_time(68_664_898 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(49 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (529_812_000 as Weight) + // Standard Error: 2_513_000 + .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(53 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - // Minimum execution time: 249_711 nanoseconds. - Weight::from_ref_time(212_629_798 as u64) - // Standard Error: 378_159 - .saturating_add(Weight::from_ref_time(415_326_230 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (184_803_000 as Weight) + // Standard Error: 733_000 + .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 365_702 nanoseconds. - Weight::from_ref_time(499_337_686 as u64) - // Standard Error: 1_232_330 - .saturating_add(Weight::from_ref_time(70_648_878 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(48 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (500_958_000 as Weight) + // Standard Error: 2_980_000 + .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(52 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - // Minimum execution time: 251_357 nanoseconds. - Weight::from_ref_time(220_533_580 as u64) - // Standard Error: 345_297 - .saturating_add(Weight::from_ref_time(349_413_968 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (177_682_000 as Weight) + // Standard Error: 743_000 + .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 354_162 nanoseconds. - Weight::from_ref_time(472_811_575 as u64) - // Standard Error: 1_109_282 - .saturating_add(Weight::from_ref_time(154_074_386 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (465_285_000 as Weight) + // Standard Error: 2_599_000 + .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - // Minimum execution time: 247_551 nanoseconds. - Weight::from_ref_time(219_176_526 as u64) - // Standard Error: 358_914 - .saturating_add(Weight::from_ref_time(326_009_513 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (179_118_000 as Weight) + // Standard Error: 572_000 + .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 339_149 nanoseconds. - Weight::from_ref_time(440_615_016 as u64) - // Standard Error: 954_837 - .saturating_add(Weight::from_ref_time(66_153_533 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (423_056_000 as Weight) + // Standard Error: 2_037_000 + .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(54 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - // Minimum execution time: 251_812 nanoseconds. - Weight::from_ref_time(209_954_069 as u64) - // Standard Error: 398_380 - .saturating_add(Weight::from_ref_time(438_573_954 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (188_884_000 as Weight) + // Standard Error: 761_000 + .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 374_594 nanoseconds. - Weight::from_ref_time(525_213_792 as u64) - // Standard Error: 1_378_489 - .saturating_add(Weight::from_ref_time(161_599_623 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(48 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + (532_408_000 as Weight) + // Standard Error: 3_348_000 + .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(55 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(53 as Weight)) + .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - // Minimum execution time: 251_379 nanoseconds. - Weight::from_ref_time(204_214_298 as u64) - // Standard Error: 662_575 - .saturating_add(Weight::from_ref_time(1_366_716_853 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + (127_181_000 as Weight) + // Standard Error: 1_495_000 + .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - // Minimum execution time: 252_896 nanoseconds. - Weight::from_ref_time(253_811_000 as u64) - // Standard Error: 6_576_179 - .saturating_add(Weight::from_ref_time(17_254_952_849 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((160 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 3_803_000 + .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - // Minimum execution time: 249_312 nanoseconds. - Weight::from_ref_time(253_806_000 as u64) - // Standard Error: 6_118_873 - .saturating_add(Weight::from_ref_time(17_081_370_212 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((150 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((75 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 6_045_000 + .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - // Minimum execution time: 12_001_522 nanoseconds. - Weight::from_ref_time(10_903_312_955 as u64) - // Standard Error: 4_301_096 - .saturating_add(Weight::from_ref_time(1_243_413_241 as u64).saturating_mul(t as u64)) - // Standard Error: 6_449 - .saturating_add(Weight::from_ref_time(9_713_655 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(167 as u64)) - .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) - .saturating_add(RocksDbWeight::get().writes(163 as u64)) - .saturating_add(RocksDbWeight::get().writes((81 as u64).saturating_mul(t as u64))) + (9_196_444_000 as Weight) + // Standard Error: 20_486_000 + .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 8_000 + .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(85 as Weight)) + .saturating_add(RocksDbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) + .saturating_add(RocksDbWeight::get().writes(81 as Weight)) + .saturating_add(RocksDbWeight::get().writes((81 as Weight).saturating_mul(t as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - // Minimum execution time: 254_969 nanoseconds. - Weight::from_ref_time(255_984_000 as u64) - // Standard Error: 18_545_048 - .saturating_add(Weight::from_ref_time(22_343_189_765 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) - .saturating_add(RocksDbWeight::get().writes((400 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 36_253_000 + .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((320 as Weight).saturating_mul(r as Weight))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -2066,531 +1814,456 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) - // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - // Minimum execution time: 14_077_497 nanoseconds. - Weight::from_ref_time(13_949_740_588 as u64) - // Standard Error: 66_631 - .saturating_add(Weight::from_ref_time(120_519_572 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(249 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) - .saturating_add(RocksDbWeight::get().writes(247 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(t as u64))) + (12_282_498_000 as Weight) + // Standard Error: 48_112_000 + .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 22_000 + .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(167 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) + .saturating_add(RocksDbWeight::get().writes(165 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(t as Weight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Minimum execution time: 247_445 nanoseconds. - Weight::from_ref_time(251_229_791 as u64) - // Standard Error: 88_045 - .saturating_add(Weight::from_ref_time(57_577_008 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (203_959_000 as Weight) + // Standard Error: 142_000 + .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 308_069 nanoseconds. - Weight::from_ref_time(308_971_000 as u64) - // Standard Error: 46_181 - .saturating_add(Weight::from_ref_time(321_835_684 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (349_915_000 as Weight) + // Standard Error: 40_000 + .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - // Minimum execution time: 247_107 nanoseconds. - Weight::from_ref_time(250_125_030 as u64) - // Standard Error: 88_769 - .saturating_add(Weight::from_ref_time(70_727_669 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (209_219_000 as Weight) + // Standard Error: 157_000 + .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 319_515 nanoseconds. - Weight::from_ref_time(319_784_000 as u64) - // Standard Error: 58_896 - .saturating_add(Weight::from_ref_time(246_433_962 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (208_860_000 as Weight) + // Standard Error: 25_000 + .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Minimum execution time: 247_887 nanoseconds. - Weight::from_ref_time(250_452_702 as u64) - // Standard Error: 140_887 - .saturating_add(Weight::from_ref_time(49_538_397 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (206_165_000 as Weight) + // Standard Error: 138_000 + .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 297_534 nanoseconds. - Weight::from_ref_time(298_249_000 as u64) - // Standard Error: 49_680 - .saturating_add(Weight::from_ref_time(99_001_103 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (255_955_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Minimum execution time: 245_926 nanoseconds. - Weight::from_ref_time(248_471_834 as u64) - // Standard Error: 101_639 - .saturating_add(Weight::from_ref_time(47_889_865 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (208_153_000 as Weight) + // Standard Error: 140_000 + .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 294_835 nanoseconds. - Weight::from_ref_time(296_328_000 as u64) - // Standard Error: 46_612 - .saturating_add(Weight::from_ref_time(98_859_152 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (278_368_000 as Weight) + // Standard Error: 14_000 + .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - // Minimum execution time: 251_104 nanoseconds. - Weight::from_ref_time(253_114_893 as u64) - // Standard Error: 316_740 - .saturating_add(Weight::from_ref_time(2_964_072_706 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (331_955_000 as Weight) + // Standard Error: 1_155_000 + .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 1]`. + /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - // Minimum execution time: 250_048 nanoseconds. - Weight::from_ref_time(251_774_991 as u64) - // Standard Error: 115_294 - .saturating_add(Weight::from_ref_time(2_094_245_208 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (207_838_000 as Weight) + // Standard Error: 783_000 + .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:2 w:2) // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 250_830 nanoseconds. - Weight::from_ref_time(251_477_000 as u64) - // Standard Error: 2_727_998 - .saturating_add(Weight::from_ref_time(1_390_149_283 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((225 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((150 as u64).saturating_mul(r as u64))) + (0 as Weight) + // Standard Error: 1_567_000 + .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - // Minimum execution time: 69_022 nanoseconds. - Weight::from_ref_time(69_707_657 as u64) - // Standard Error: 8_674 - .saturating_add(Weight::from_ref_time(887_555 as u64).saturating_mul(r as u64)) + (73_955_000 as Weight) + // Standard Error: 1_000 + .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - // Minimum execution time: 69_491 nanoseconds. - Weight::from_ref_time(70_354_670 as u64) - // Standard Error: 1_518 - .saturating_add(Weight::from_ref_time(2_758_912 as u64).saturating_mul(r as u64)) + (74_057_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - // Minimum execution time: 69_156 nanoseconds. - Weight::from_ref_time(69_917_601 as u64) - // Standard Error: 1_970 - .saturating_add(Weight::from_ref_time(2_753_174 as u64).saturating_mul(r as u64)) + (74_137_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - // Minimum execution time: 68_944 nanoseconds. - Weight::from_ref_time(69_727_961 as u64) - // Standard Error: 376 - .saturating_add(Weight::from_ref_time(2_356_996 as u64).saturating_mul(r as u64)) + (73_844_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - // Minimum execution time: 68_971 nanoseconds. - Weight::from_ref_time(69_755_949 as u64) - // Standard Error: 543 - .saturating_add(Weight::from_ref_time(2_489_510 as u64).saturating_mul(r as u64)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - // Minimum execution time: 69_061 nanoseconds. - Weight::from_ref_time(69_625_000 as u64) - // Standard Error: 486 - .saturating_add(Weight::from_ref_time(1_431_684 as u64).saturating_mul(r as u64)) + (73_924_000 as Weight) + // Standard Error: 3_000 + .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - // Minimum execution time: 69_058 nanoseconds. - Weight::from_ref_time(69_521_790 as u64) - // Standard Error: 892 - .saturating_add(Weight::from_ref_time(1_964_054 as u64).saturating_mul(r as u64)) + (73_574_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - // Minimum execution time: 69_020 nanoseconds. - Weight::from_ref_time(69_344_255 as u64) - // Standard Error: 1_408 - .saturating_add(Weight::from_ref_time(2_169_179 as u64).saturating_mul(r as u64)) + (73_343_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - // Minimum execution time: 72_366 nanoseconds. - Weight::from_ref_time(72_869_594 as u64) - // Standard Error: 73 - .saturating_add(Weight::from_ref_time(3_867 as u64).saturating_mul(e as u64)) + (76_267_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - // Minimum execution time: 69_164 nanoseconds. - Weight::from_ref_time(70_269_099 as u64) - // Standard Error: 8_824 - .saturating_add(Weight::from_ref_time(6_594_634 as u64).saturating_mul(r as u64)) + (74_877_000 as Weight) + // Standard Error: 12_000 + .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - // Minimum execution time: 83_348 nanoseconds. - Weight::from_ref_time(84_968_895 as u64) - // Standard Error: 6_305 - .saturating_add(Weight::from_ref_time(8_395_193 as u64).saturating_mul(r as u64)) + (88_665_000 as Weight) + // Standard Error: 20_000 + .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - // Minimum execution time: 92_358 nanoseconds. - Weight::from_ref_time(93_605_536 as u64) - // Standard Error: 2_019 - .saturating_add(Weight::from_ref_time(536_495 as u64).saturating_mul(p as u64)) + (98_600_000 as Weight) + // Standard Error: 2_000 + .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - // Minimum execution time: 69_191 nanoseconds. - Weight::from_ref_time(70_407_702 as u64) - // Standard Error: 2_812 - .saturating_add(Weight::from_ref_time(901_706 as u64).saturating_mul(r as u64)) + (74_555_000 as Weight) + // Standard Error: 1_000 + .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - // Minimum execution time: 69_230 nanoseconds. - Weight::from_ref_time(70_255_278 as u64) - // Standard Error: 1_284 - .saturating_add(Weight::from_ref_time(951_754 as u64).saturating_mul(r as u64)) + (74_329_000 as Weight) + // Standard Error: 1_000 + .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - // Minimum execution time: 69_278 nanoseconds. - Weight::from_ref_time(70_089_139 as u64) - // Standard Error: 757 - .saturating_add(Weight::from_ref_time(1_369_185 as u64).saturating_mul(r as u64)) + (74_612_000 as Weight) + // Standard Error: 1_000 + .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - // Minimum execution time: 72_047 nanoseconds. - Weight::from_ref_time(72_783_972 as u64) - // Standard Error: 837 - .saturating_add(Weight::from_ref_time(1_471_680 as u64).saturating_mul(r as u64)) + (76_906_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - // Minimum execution time: 71_960 nanoseconds. - Weight::from_ref_time(72_745_981 as u64) - // Standard Error: 1_086 - .saturating_add(Weight::from_ref_time(1_537_741 as u64).saturating_mul(r as u64)) + (76_979_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - // Minimum execution time: 69_221 nanoseconds. - Weight::from_ref_time(70_010_862 as u64) - // Standard Error: 1_845 - .saturating_add(Weight::from_ref_time(933_738 as u64).saturating_mul(r as u64)) + (74_370_000 as Weight) + // Standard Error: 3_000 + .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - // Minimum execution time: 69_081 nanoseconds. - Weight::from_ref_time(71_015_495 as u64) - // Standard Error: 27_078 - .saturating_add(Weight::from_ref_time(183_899_704 as u64).saturating_mul(r as u64)) + (73_584_000 as Weight) + // Standard Error: 353_000 + .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - // Minimum execution time: 70_589 nanoseconds. - Weight::from_ref_time(70_175_537 as u64) - // Standard Error: 1_355 - .saturating_add(Weight::from_ref_time(1_323_745 as u64).saturating_mul(r as u64)) + (74_206_000 as Weight) + // Standard Error: 1_000 + .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - // Minimum execution time: 69_083 nanoseconds. - Weight::from_ref_time(69_832_339 as u64) - // Standard Error: 818 - .saturating_add(Weight::from_ref_time(1_334_198 as u64).saturating_mul(r as u64)) + (73_992_000 as Weight) + // Standard Error: 1_000 + .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - // Minimum execution time: 69_084 nanoseconds. - Weight::from_ref_time(69_802_701 as u64) - // Standard Error: 744 - .saturating_add(Weight::from_ref_time(1_334_601 as u64).saturating_mul(r as u64)) + (73_985_000 as Weight) + // Standard Error: 2_000 + .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - // Minimum execution time: 69_052 nanoseconds. - Weight::from_ref_time(69_717_748 as u64) - // Standard Error: 571 - .saturating_add(Weight::from_ref_time(1_346_564 as u64).saturating_mul(r as u64)) + (74_117_000 as Weight) + // Standard Error: 4_000 + .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - // Minimum execution time: 69_016 nanoseconds. - Weight::from_ref_time(69_793_413 as u64) - // Standard Error: 769 - .saturating_add(Weight::from_ref_time(1_317_502 as u64).saturating_mul(r as u64)) + (73_981_000 as Weight) + // Standard Error: 1_000 + .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - // Minimum execution time: 69_043 nanoseconds. - Weight::from_ref_time(69_963_419 as u64) - // Standard Error: 1_117 - .saturating_add(Weight::from_ref_time(1_313_727 as u64).saturating_mul(r as u64)) + (74_104_000 as Weight) + // Standard Error: 3_000 + .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - // Minimum execution time: 69_032 nanoseconds. - Weight::from_ref_time(69_727_577 as u64) - // Standard Error: 662 - .saturating_add(Weight::from_ref_time(1_331_088 as u64).saturating_mul(r as u64)) + (74_293_000 as Weight) + // Standard Error: 3_000 + .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - // Minimum execution time: 69_097 nanoseconds. - Weight::from_ref_time(69_767_650 as u64) - // Standard Error: 2_056 - .saturating_add(Weight::from_ref_time(1_875_021 as u64).saturating_mul(r as u64)) + (74_055_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - // Minimum execution time: 69_153 nanoseconds. - Weight::from_ref_time(69_906_946 as u64) - // Standard Error: 1_060 - .saturating_add(Weight::from_ref_time(1_867_154 as u64).saturating_mul(r as u64)) + (73_710_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - // Minimum execution time: 70_380 nanoseconds. - Weight::from_ref_time(69_867_328 as u64) - // Standard Error: 778 - .saturating_add(Weight::from_ref_time(1_869_718 as u64).saturating_mul(r as u64)) + (73_917_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - // Minimum execution time: 69_259 nanoseconds. - Weight::from_ref_time(69_695_407 as u64) - // Standard Error: 746 - .saturating_add(Weight::from_ref_time(1_874_772 as u64).saturating_mul(r as u64)) + (74_048_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - // Minimum execution time: 68_986 nanoseconds. - Weight::from_ref_time(70_027_081 as u64) - // Standard Error: 1_401 - .saturating_add(Weight::from_ref_time(1_862_971 as u64).saturating_mul(r as u64)) + (74_029_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - // Minimum execution time: 68_953 nanoseconds. - Weight::from_ref_time(69_798_073 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_871_888 as u64).saturating_mul(r as u64)) + (74_267_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - // Minimum execution time: 68_909 nanoseconds. - Weight::from_ref_time(69_845_981 as u64) - // Standard Error: 775 - .saturating_add(Weight::from_ref_time(1_868_722 as u64).saturating_mul(r as u64)) + (73_952_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - // Minimum execution time: 68_986 nanoseconds. - Weight::from_ref_time(69_683_189 as u64) - // Standard Error: 503 - .saturating_add(Weight::from_ref_time(1_884_715 as u64).saturating_mul(r as u64)) + (73_851_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - // Minimum execution time: 69_230 nanoseconds. - Weight::from_ref_time(69_765_336 as u64) - // Standard Error: 2_060 - .saturating_add(Weight::from_ref_time(1_871_848 as u64).saturating_mul(r as u64)) + (74_034_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - // Minimum execution time: 68_953 nanoseconds. - Weight::from_ref_time(69_828_265 as u64) - // Standard Error: 951 - .saturating_add(Weight::from_ref_time(1_868_596 as u64).saturating_mul(r as u64)) + (73_979_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - // Minimum execution time: 69_078 nanoseconds. - Weight::from_ref_time(69_832_768 as u64) - // Standard Error: 894 - .saturating_add(Weight::from_ref_time(1_845_786 as u64).saturating_mul(r as u64)) + (74_000_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - // Minimum execution time: 68_939 nanoseconds. - Weight::from_ref_time(69_676_256 as u64) - // Standard Error: 374 - .saturating_add(Weight::from_ref_time(1_851_026 as u64).saturating_mul(r as u64)) + (73_883_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - // Minimum execution time: 69_096 nanoseconds. - Weight::from_ref_time(69_914_159 as u64) - // Standard Error: 1_265 - .saturating_add(Weight::from_ref_time(1_844_489 as u64).saturating_mul(r as u64)) + (74_216_000 as Weight) + // Standard Error: 5_000 + .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - // Minimum execution time: 68_939 nanoseconds. - Weight::from_ref_time(69_641_768 as u64) - // Standard Error: 347 - .saturating_add(Weight::from_ref_time(2_488_628 as u64).saturating_mul(r as u64)) + (73_989_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - // Minimum execution time: 69_114 nanoseconds. - Weight::from_ref_time(69_844_395 as u64) - // Standard Error: 1_489 - .saturating_add(Weight::from_ref_time(2_456_310 as u64).saturating_mul(r as u64)) + (73_857_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - // Minimum execution time: 69_082 nanoseconds. - Weight::from_ref_time(69_993_662 as u64) - // Standard Error: 1_218 - .saturating_add(Weight::from_ref_time(2_524_010 as u64).saturating_mul(r as u64)) + (73_801_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - // Minimum execution time: 69_036 nanoseconds. - Weight::from_ref_time(70_095_304 as u64) - // Standard Error: 1_473 - .saturating_add(Weight::from_ref_time(2_429_659 as u64).saturating_mul(r as u64)) + (74_130_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - // Minimum execution time: 69_229 nanoseconds. - Weight::from_ref_time(69_759_818 as u64) - // Standard Error: 573 - .saturating_add(Weight::from_ref_time(1_879_670 as u64).saturating_mul(r as u64)) + (74_071_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - // Minimum execution time: 69_151 nanoseconds. - Weight::from_ref_time(69_865_948 as u64) - // Standard Error: 721 - .saturating_add(Weight::from_ref_time(1_846_734 as u64).saturating_mul(r as u64)) + (74_201_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - // Minimum execution time: 69_120 nanoseconds. - Weight::from_ref_time(70_135_849 as u64) - // Standard Error: 3_443 - .saturating_add(Weight::from_ref_time(1_841_784 as u64).saturating_mul(r as u64)) + (74_241_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - // Minimum execution time: 69_077 nanoseconds. - Weight::from_ref_time(69_929_746 as u64) - // Standard Error: 821 - .saturating_add(Weight::from_ref_time(1_866_348 as u64).saturating_mul(r as u64)) + (74_331_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - // Minimum execution time: 69_226 nanoseconds. - Weight::from_ref_time(69_725_630 as u64) - // Standard Error: 891 - .saturating_add(Weight::from_ref_time(1_873_637 as u64).saturating_mul(r as u64)) + (73_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - // Minimum execution time: 70_591 nanoseconds. - Weight::from_ref_time(69_939_773 as u64) - // Standard Error: 960 - .saturating_add(Weight::from_ref_time(1_867_208 as u64).saturating_mul(r as u64)) + (73_807_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - // Minimum execution time: 69_187 nanoseconds. - Weight::from_ref_time(69_845_516 as u64) - // Standard Error: 781 - .saturating_add(Weight::from_ref_time(1_869_613 as u64).saturating_mul(r as u64)) + (73_725_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - // Minimum execution time: 69_065 nanoseconds. - Weight::from_ref_time(69_950_430 as u64) - // Standard Error: 986 - .saturating_add(Weight::from_ref_time(1_867_001 as u64).saturating_mul(r as u64)) + (73_755_000 as Weight) + // Standard Error: 3_000 + .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) } } diff --git a/frame/conviction-voting/Cargo.toml b/frame/conviction-voting/Cargo.toml index 3c40017ece8e7..ab6d04d199bc6 100644 --- a/frame/conviction-voting/Cargo.toml +++ b/frame/conviction-voting/Cargo.toml @@ -36,7 +36,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", @@ -46,8 +46,7 @@ std = [ "sp-std/std", ] runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", + "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index 117bb7fe22989..53ac7a07302f9 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; +use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ dispatch::RawOrigin, traits::{fungible, Currency, Get}, @@ -34,9 +34,8 @@ const SEED: u32 = 0; /// Fill all classes as much as possible up to `MaxVotes` and return the Class with the most votes /// ongoing. -fn fill_voting, I: 'static>( -) -> (ClassOf, BTreeMap, Vec>>) { - let mut r = BTreeMap::, Vec>>::new(); +fn fill_voting() -> (ClassOf, BTreeMap, Vec>>) { + let mut r = BTreeMap::, Vec>>::new(); for class in T::Polls::classes().into_iter() { for _ in 0..T::MaxVotes::get() { match T::Polls::create_ongoing(class.clone()) { @@ -49,34 +48,34 @@ fn fill_voting, I: 'static>( (c, r) } -fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { +fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn account_vote, I: 'static>(b: BalanceOf) -> AccountVote> { +fn account_vote(b: BalanceOf) -> AccountVote> { let v = Vote { aye: true, conviction: Conviction::Locked1x }; AccountVote::Standard { vote: v, balance: b } } -benchmarks_instance_pallet! { +benchmarks! { where_clause { where T::MaxVotes: core::fmt::Debug } vote_new { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let account_vote = account_vote::(100u32.into()); + let account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len() - 1; // We need to create existing votes for i in polls.iter().skip(1) { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -86,52 +85,52 @@ benchmarks_instance_pallet! { }: vote(RawOrigin::Signed(caller.clone()), index, account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r + 1) as usize ); } vote_existing { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r, "Votes were not recorded."); - let new_account_vote = account_vote::(200u32.into()); + let new_account_vote = account_vote::(200u32.into()); let index = polls[0]; }: vote(RawOrigin::Signed(caller.clone()), index, new_account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); } remove_vote { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -141,26 +140,25 @@ benchmarks_instance_pallet! { }: _(RawOrigin::Signed(caller.clone()), Some(class.clone()), index) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } remove_other_vote { - let caller = funded_account::("caller", 0); - let voter = funded_account::("caller", 0); - let voter_lookup = T::Lookup::unlookup(voter.clone()); + let caller = funded_account::("caller", 0); + let voter = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -168,10 +166,10 @@ benchmarks_instance_pallet! { let index = polls[0]; assert!(T::Polls::end_ongoing(index, false).is_ok()); - }: _(RawOrigin::Signed(caller.clone()), voter_lookup, class.clone(), index) + }: _(RawOrigin::Signed(caller.clone()), voter.clone(), class.clone(), index) verify { assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } @@ -179,81 +177,78 @@ benchmarks_instance_pallet! { delegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let voter_lookup = T::Lookup::unlookup(voter.clone()); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); // We need to create existing delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter_lookup, Conviction::Locked1x, delegated_balance) + }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter, Conviction::Locked1x, delegated_balance) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); } undelegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let voter_lookup = T::Lookup::unlookup(voter.clone()); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); - ConvictionVoting::::delegate( + ConvictionVoting::::delegate( RawOrigin::Signed(caller.clone()).into(), class.clone(), - voter_lookup, + voter.clone(), Conviction::Locked1x, delegated_balance, )?; // We need to create delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); }: _(RawOrigin::Signed(caller.clone()), class.clone()) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); } unlock { - let caller = funded_account::("caller", 0); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); - let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); + let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); + let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); // Fill everything up to the max by filling all classes with votes and voting on them all. - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); assert!(all_polls.len() > 0); for (class, polls) in all_polls.iter() { assert!(polls.len() > 0); for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; } } @@ -262,15 +257,15 @@ benchmarks_instance_pallet! { // Vote big on the class with the most ongoing votes of them to bump the lock and make it // hard to recompute when removed. - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; let now_usable = >::reducible_balance(&caller, false); assert_eq!(orig_usable - now_usable, 100u32.into()); // Remove the vote - ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; + ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; // We can now unlock on `class` from 200 to 100... - }: _(RawOrigin::Signed(caller.clone()), class, caller_lookup) + }: _(RawOrigin::Signed(caller.clone()), class, caller.clone()) verify { assert_eq!(orig_usable, >::reducible_balance(&caller, false)); } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 3ecc6e56be94e..54fc1156d1f47 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -36,7 +36,7 @@ use frame_support::{ }, }; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, + traits::{AtLeast32BitUnsigned, Saturating, Zero}, ArithmeticError, Perbill, }; use sp_std::prelude::*; @@ -62,7 +62,6 @@ pub mod benchmarking; const CONVICTION_VOTING_ID: LockIdentifier = *b"pyconvot"; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; type VotingOf = Voting< @@ -98,8 +97,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { // System level stuff. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// Currency type with which voting happens. @@ -121,8 +119,8 @@ pub mod pallet { /// The maximum number of concurrent votes an account may have. /// - /// Also used to compute weight, an overly large value can lead to extrinsics with large - /// weight estimation: see `delegate` for instance. + /// Also used to compute weight, an overly large value can + /// lead to extrinsic with large weight estimation: see `delegate` for instance. #[pallet::constant] type MaxVotes: Get; @@ -247,12 +245,11 @@ pub mod pallet { pub fn delegate( origin: OriginFor, class: ClassOf, - to: AccountIdLookupOf, + to: T::AccountId, conviction: Conviction, balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let to = T::Lookup::lookup(to)?; let votes = Self::try_delegate(who, class, to, conviction, balance)?; Ok(Some(T::WeightInfo::delegate(votes)).into()) @@ -261,7 +258,7 @@ pub mod pallet { /// Undelegate the voting power of the sending account for a particular class of polls. /// /// Tokens may be unlocked following once an amount of time consistent with the lock period - /// of the conviction with which the delegation was issued has passed. + /// of the conviction with which the delegation was issued. /// /// The dispatch origin of this call must be _Signed_ and the signing account must be /// currently delegating. @@ -284,7 +281,7 @@ pub mod pallet { Ok(Some(T::WeightInfo::undelegate(votes)).into()) } - /// Remove the lock caused by prior voting/delegating which has expired within a particular + /// Remove the lock caused prior voting/delegating which has expired within a particluar /// class. /// /// The dispatch origin of this call must be _Signed_. @@ -297,10 +294,9 @@ pub mod pallet { pub fn unlock( origin: OriginFor, class: ClassOf, - target: AccountIdLookupOf, + target: T::AccountId, ) -> DispatchResult { ensure_signed(origin)?; - let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); Ok(()) } @@ -363,12 +359,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_other_vote())] pub fn remove_other_vote( origin: OriginFor, - target: AccountIdLookupOf, + target: T::AccountId, class: ClassOf, index: PollIndexOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - let target = T::Lookup::lookup(target)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, Some(class), scope)?; Ok(()) @@ -400,7 +395,7 @@ impl, I: 'static> Pallet { Err(i) => { votes .try_insert(i, (poll_index, vote)) - .map_err(|_| Error::::MaxVotesReached)?; + .map_err(|()| Error::::MaxVotesReached)?; }, } // Shouldn't be possible to fail, but we handle it gracefully. @@ -475,7 +470,7 @@ impl, I: 'static> Pallet { }) } - /// Return the number of votes for `who`. + /// Return the number of votes for `who` fn increase_upstream_delegation( who: &T::AccountId, class: &ClassOf, @@ -503,7 +498,7 @@ impl, I: 'static> Pallet { }) } - /// Return the number of votes for `who`. + /// Return the number of votes for `who` fn reduce_upstream_delegation( who: &T::AccountId, class: &ClassOf, diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index 7a3f80442014a..9eb7f679efca3 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -22,7 +22,6 @@ use std::collections::BTreeMap; use frame_support::{ assert_noop, assert_ok, parameter_types, traits::{ConstU32, ConstU64, Contains, Polling, VoteTally}, - weights::Weight, }; use sp_core::H256; use sp_runtime::{ @@ -50,31 +49,31 @@ frame_support::construct_runtime!( // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &RuntimeCall) -> bool { - !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) +impl Contains for BaseFilter { + fn contains(call: &Call) -> bool { + !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) } } parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); + frame_system::limits::BlockWeights::simple_max(1_000_000); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -92,7 +91,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type MaxLocks = ConstU32<10>; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -187,7 +186,7 @@ impl Polling> for TestPolls { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = pallet_balances::Pallet; type VoteLockingPeriod = ConstU64<3>; type MaxVotes = ConstU32<3>; @@ -269,28 +268,28 @@ fn basic_stuff() { #[test] fn basic_voting_works() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(2, 5))); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); + assert_ok!(Voting::vote(Origin::signed(1), 3, nay(2, 5))); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(5, 1))); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 0))); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + assert_ok!(Voting::remove_vote(Origin::signed(1), None, 3)); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + assert_ok!(Voting::unlock(Origin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -298,28 +297,28 @@ fn basic_voting_works() { #[test] fn voting_balance_gets_locked() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(2, 5))); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); + assert_ok!(Voting::vote(Origin::signed(1), 3, nay(2, 5))); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(5, 1))); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 0))); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + assert_ok!(Voting::remove_vote(Origin::signed(1), None, 3)); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + assert_ok!(Voting::unlock(Origin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -327,12 +326,12 @@ fn voting_balance_gets_locked() { #[test] fn successful_but_zero_conviction_vote_balance_can_be_unlocked() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(1, 1))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, nay(20, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(1, 1))); + assert_ok!(Voting::vote(Origin::signed(2), 3, nay(20, 0))); let c = class(3); Polls::set(vec![(3, Completed(3, false))].into_iter().collect()); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(2), Some(c), 3)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(2), c, 2)); + assert_ok!(Voting::remove_vote(Origin::signed(2), Some(c), 3)); + assert_ok!(Voting::unlock(Origin::signed(2), c, 2)); assert_eq!(Balances::usable_balance(2), 20); }); } @@ -340,12 +339,12 @@ fn successful_but_zero_conviction_vote_balance_can_be_unlocked() { #[test] fn unsuccessful_conviction_vote_balance_can_be_unlocked() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(1, 1))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, nay(20, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(1, 1))); + assert_ok!(Voting::vote(Origin::signed(2), 3, nay(20, 0))); let c = class(3); Polls::set(vec![(3, Completed(3, false))].into_iter().collect()); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(c), 3)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), c, 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(c), 3)); + assert_ok!(Voting::unlock(Origin::signed(1), c, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -354,17 +353,17 @@ fn unsuccessful_conviction_vote_balance_can_be_unlocked() { fn successful_conviction_vote_balance_stays_locked_for_correct_time() { new_test_ext().execute_with(|| { for i in 1..=5 { - assert_ok!(Voting::vote(RuntimeOrigin::signed(i), 3, aye(10, i as u8))); + assert_ok!(Voting::vote(Origin::signed(i), 3, aye(10, i as u8))); } let c = class(3); Polls::set(vec![(3, Completed(3, true))].into_iter().collect()); for i in 1..=5 { - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(i), Some(c), 3)); + assert_ok!(Voting::remove_vote(Origin::signed(i), Some(c), 3)); } for block in 1..=(3 + 5 * 3) { run_to(block); for i in 1..=5 { - assert_ok!(Voting::unlock(RuntimeOrigin::signed(i), c, i)); + assert_ok!(Voting::unlock(Origin::signed(i), c, i)); let expired = block >= (3 << (i - 1)) + 3; assert_eq!(Balances::usable_balance(i), i * 10 - if expired { 0 } else { 10 }); } @@ -385,20 +384,20 @@ fn classwise_delegation_works() { .into_iter() .collect(), ); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 3, Conviction::Locked1x, 5)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 4, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(Origin::signed(1), 1, 3, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(Origin::signed(1), 2, 4, Conviction::Locked1x, 5)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 0, aye(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 1, nay(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 2, nay(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(3), 0, nay(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(3), 1, aye(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(3), 2, nay(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 0, nay(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 1, nay(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 2, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(2), 0, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(2), 1, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(2), 2, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(3), 0, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(3), 1, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(3), 2, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(4), 0, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(4), 1, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(4), 2, aye(10, 0))); // 4 hasn't voted yet assert_eq!( @@ -414,7 +413,7 @@ fn classwise_delegation_works() { ); // 4 votes nay to 3. - assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 3, nay(10, 0))); + assert_ok!(Voting::vote(Origin::signed(4), 3, nay(10, 0))); assert_eq!( Polls::get(), vec![ @@ -428,8 +427,8 @@ fn classwise_delegation_works() { ); // Redelegate for class 2 to account 3. - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 3, Conviction::Locked1x, 5)); + assert_ok!(Voting::undelegate(Origin::signed(1), 2)); + assert_ok!(Voting::delegate(Origin::signed(1), 2, 3, Conviction::Locked1x, 5)); assert_eq!( Polls::get(), vec![ @@ -443,12 +442,12 @@ fn classwise_delegation_works() { ); // Redelegating with a lower lock does not forget previous lock and updates correctly. - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 1)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 3)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 3, Conviction::Locked1x, 3)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 4, Conviction::Locked1x, 3)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::undelegate(Origin::signed(1), 1)); + assert_ok!(Voting::undelegate(Origin::signed(1), 2)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 3)); + assert_ok!(Voting::delegate(Origin::signed(1), 1, 3, Conviction::Locked1x, 3)); + assert_ok!(Voting::delegate(Origin::signed(1), 2, 4, Conviction::Locked1x, 3)); assert_eq!( Polls::get(), vec![ @@ -462,24 +461,24 @@ fn classwise_delegation_works() { ); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); // unlock does nothing since the delegation already took place. assert_eq!(Balances::usable_balance(1), 5); // Redelegating with higher amount extends previous lock. - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 6)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 6)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 4); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 1)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 3, Conviction::Locked1x, 7)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::undelegate(Origin::signed(1), 1)); + assert_ok!(Voting::delegate(Origin::signed(1), 1, 3, Conviction::Locked1x, 7)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); assert_eq!(Balances::usable_balance(1), 3); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 4, Conviction::Locked1x, 8)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::undelegate(Origin::signed(1), 2)); + assert_ok!(Voting::delegate(Origin::signed(1), 2, 4, Conviction::Locked1x, 8)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 2); assert_eq!( Polls::get(), @@ -499,14 +498,14 @@ fn classwise_delegation_works() { fn redelegation_after_vote_ending_should_keep_lock() { new_test_ext().execute_with(|| { Polls::set(vec![(0, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 0, aye(10, 1))); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::vote(Origin::signed(2), 0, aye(10, 1))); Polls::set(vec![(0, Completed(1, true))].into_iter().collect()); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 3, Conviction::Locked1x, 3)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 3, Conviction::Locked1x, 3)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 5); }); } @@ -523,9 +522,9 @@ fn lock_amalgamation_valid_with_multiple_removed_votes() { .into_iter() .collect(), ); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(5, 1))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 1))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 2, aye(5, 2))); + assert_ok!(Voting::vote(Origin::signed(1), 0, aye(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 1, aye(10, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 2, aye(5, 2))); assert_eq!(Balances::usable_balance(1), 0); Polls::set( @@ -533,28 +532,28 @@ fn lock_amalgamation_valid_with_multiple_removed_votes() { .into_iter() .collect(), ); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 2)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 2)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(3); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -562,25 +561,25 @@ fn lock_amalgamation_valid_with_multiple_removed_votes() { #[test] fn lock_amalgamation_valid_with_multiple_delegations() { new_test_ext().execute_with(|| { - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 10)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked2x, 5)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 10)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked2x, 5)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); run_to(3); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -589,32 +588,32 @@ fn lock_amalgamation_valid_with_multiple_delegations() { fn lock_amalgamation_valid_with_move_roundtrip_to_delegation() { new_test_ext().execute_with(|| { Polls::set(vec![(0, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 0, aye(5, 1))); Polls::set(vec![(0, Completed(1, true))].into_iter().collect()); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 10)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 10)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); Polls::set(vec![(1, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(5, 2))); + assert_ok!(Voting::vote(Origin::signed(1), 1, aye(5, 2))); Polls::set(vec![(1, Completed(1, true))].into_iter().collect()); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 1)); run_to(3); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -622,33 +621,33 @@ fn lock_amalgamation_valid_with_move_roundtrip_to_delegation() { #[test] fn lock_amalgamation_valid_with_move_roundtrip_to_casting() { new_test_ext().execute_with(|| { - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 5); Polls::set(vec![(0, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(10, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 0, aye(10, 1))); Polls::set(vec![(0, Completed(1, true))].into_iter().collect()); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked2x, 10)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked2x, 10)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); run_to(3); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -656,30 +655,30 @@ fn lock_amalgamation_valid_with_move_roundtrip_to_casting() { #[test] fn lock_aggregation_over_different_classes_with_delegation_works() { new_test_ext().execute_with(|| { - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 2, Conviction::Locked2x, 5)); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 2, Conviction::Locked1x, 10)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(Origin::signed(1), 1, 2, Conviction::Locked2x, 5)); + assert_ok!(Voting::delegate(Origin::signed(1), 2, 2, Conviction::Locked1x, 10)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 1)); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::undelegate(Origin::signed(1), 1)); + assert_ok!(Voting::undelegate(Origin::signed(1), 2)); run_to(3); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 5); run_to(7); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -696,34 +695,34 @@ fn lock_aggregation_over_different_classes_with_casting_works() { .into_iter() .collect(), ); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(5, 1))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 1))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 2, aye(5, 2))); + assert_ok!(Voting::vote(Origin::signed(1), 0, aye(5, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 1, aye(10, 1))); + assert_ok!(Voting::vote(Origin::signed(1), 2, aye(5, 2))); Polls::set( vec![(0, Completed(1, true)), (1, Completed(1, true)), (2, Completed(1, true))] .into_iter() .collect(), ); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(1), 1)); - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(2), 2)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(1), 1)); + assert_ok!(Voting::remove_vote(Origin::signed(1), Some(2), 2)); run_to(3); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 5); run_to(7); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -731,30 +730,21 @@ fn lock_aggregation_over_different_classes_with_casting_works() { #[test] fn errors_with_vote_work() { new_test_ext().execute_with(|| { + assert_noop!(Voting::vote(Origin::signed(1), 0, aye(10, 0)), Error::::NotOngoing); + assert_noop!(Voting::vote(Origin::signed(1), 1, aye(10, 0)), Error::::NotOngoing); + assert_noop!(Voting::vote(Origin::signed(1), 2, aye(10, 0)), Error::::NotOngoing); assert_noop!( - Voting::vote(RuntimeOrigin::signed(1), 0, aye(10, 0)), - Error::::NotOngoing - ); - assert_noop!( - Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 0)), - Error::::NotOngoing - ); - assert_noop!( - Voting::vote(RuntimeOrigin::signed(1), 2, aye(10, 0)), - Error::::NotOngoing - ); - assert_noop!( - Voting::vote(RuntimeOrigin::signed(1), 3, aye(11, 0)), + Voting::vote(Origin::signed(1), 3, aye(11, 0)), Error::::InsufficientFunds ); - assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::None, 10)); + assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::None, 10)); assert_noop!( - Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0)), + Voting::vote(Origin::signed(1), 3, aye(10, 0)), Error::::AlreadyDelegating ); - assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::undelegate(Origin::signed(1), 0)); Polls::set( vec![ (0, Ongoing(Tally::new(0), 0)), @@ -765,11 +755,11 @@ fn errors_with_vote_work() { .into_iter() .collect(), ); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 0))); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 2, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 0, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 1, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 2, aye(10, 0))); assert_noop!( - Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0)), + Voting::vote(Origin::signed(1), 3, aye(10, 0)), Error::::MaxVotesReached ); }); @@ -779,21 +769,21 @@ fn errors_with_vote_work() { fn errors_with_delegating_work() { new_test_ext().execute_with(|| { assert_noop!( - Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::None, 11), + Voting::delegate(Origin::signed(1), 0, 2, Conviction::None, 11), Error::::InsufficientFunds ); assert_noop!( - Voting::delegate(RuntimeOrigin::signed(1), 3, 2, Conviction::None, 10), + Voting::delegate(Origin::signed(1), 3, 2, Conviction::None, 10), Error::::BadClass ); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 0))); assert_noop!( - Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::None, 10), + Voting::delegate(Origin::signed(1), 0, 2, Conviction::None, 10), Error::::AlreadyVoting ); - assert_noop!(Voting::undelegate(RuntimeOrigin::signed(1), 0), Error::::NotDelegating); + assert_noop!(Voting::undelegate(Origin::signed(1), 0), Error::::NotDelegating); }); } @@ -801,37 +791,31 @@ fn errors_with_delegating_work() { fn remove_other_vote_works() { new_test_ext().execute_with(|| { assert_noop!( - Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3), + Voting::remove_other_vote(Origin::signed(2), 1, 0, 3), Error::::NotVoter ); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 2))); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 2))); assert_noop!( - Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3), + Voting::remove_other_vote(Origin::signed(2), 1, 0, 3), Error::::NoPermission ); Polls::set(vec![(3, Completed(1, true))].into_iter().collect()); run_to(6); assert_noop!( - Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3), + Voting::remove_other_vote(Origin::signed(2), 1, 0, 3), Error::::NoPermissionYet ); run_to(7); - assert_ok!(Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3)); + assert_ok!(Voting::remove_other_vote(Origin::signed(2), 1, 0, 3)); }); } #[test] fn errors_with_remove_vote_work() { new_test_ext().execute_with(|| { - assert_noop!( - Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 3), - Error::::NotVoter - ); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 2))); + assert_noop!(Voting::remove_vote(Origin::signed(1), Some(0), 3), Error::::NotVoter); + assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 2))); Polls::set(vec![(3, Completed(1, true))].into_iter().collect()); - assert_noop!( - Voting::remove_vote(RuntimeOrigin::signed(1), None, 3), - Error::::ClassNeeded - ); + assert_noop!(Voting::remove_vote(Origin::signed(1), None, 3), Error::::ClassNeeded); }); } diff --git a/frame/conviction-voting/src/types.rs b/frame/conviction-voting/src/types.rs index d6051dff62569..8f4f3697e9766 100644 --- a/frame/conviction-voting/src/types.rs +++ b/frame/conviction-voting/src/types.rs @@ -93,9 +93,6 @@ impl< let ayes = approval.mul_ceil(support); Self { ayes, nays: support - ayes, support, dummy: PhantomData } } - - #[cfg(feature = "runtime-benchmarks")] - fn setup(_: Class, _: Perbill) {} } impl< diff --git a/frame/conviction-voting/src/vote.rs b/frame/conviction-voting/src/vote.rs index a8e012b6c97a1..a0b17ab4de39d 100644 --- a/frame/conviction-voting/src/vote.rs +++ b/frame/conviction-voting/src/vote.rs @@ -233,7 +233,7 @@ where AsMut::>::as_mut(self).rejig(now); } - /// The amount of this account's balance that must currently be locked due to voting. + /// The amount of this account's balance that much currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { Voting::Casting(Casting { votes, prior, .. }) => diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index e50842449f88a..330d02755cb8b 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_conviction_voting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/conviction-voting/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/conviction-voting/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -65,10 +62,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - // Minimum execution time: 131_633 nanoseconds. - Weight::from_ref_time(132_742_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (148_804_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -76,66 +72,58 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - // Minimum execution time: 176_240 nanoseconds. - Weight::from_ref_time(183_274_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (313_333_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - // Minimum execution time: 158_880 nanoseconds. - Weight::from_ref_time(164_648_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (300_591_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - // Minimum execution time: 60_330 nanoseconds. - Weight::from_ref_time(61_588_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (53_887_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) - /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { - // Minimum execution time: 63_088 nanoseconds. - Weight::from_ref_time(67_803_536 as u64) - // Standard Error: 197_102 - .saturating_add(Weight::from_ref_time(31_557_563 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(r as u64))) + (51_518_000 as Weight) + // Standard Error: 83_000 + .saturating_add((27_235_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) - /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { - // Minimum execution time: 45_150 nanoseconds. - Weight::from_ref_time(51_547_530 as u64) - // Standard Error: 771_127 - .saturating_add(Weight::from_ref_time(26_927_969 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(r as u64))) + (37_885_000 as Weight) + // Standard Error: 75_000 + .saturating_add((24_395_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - // Minimum execution time: 75_067 nanoseconds. - Weight::from_ref_time(76_888_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (67_703_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } } @@ -147,10 +135,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - // Minimum execution time: 131_633 nanoseconds. - Weight::from_ref_time(132_742_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (148_804_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -158,65 +145,57 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - // Minimum execution time: 176_240 nanoseconds. - Weight::from_ref_time(183_274_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (313_333_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - // Minimum execution time: 158_880 nanoseconds. - Weight::from_ref_time(164_648_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (300_591_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - // Minimum execution time: 60_330 nanoseconds. - Weight::from_ref_time(61_588_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (53_887_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) - /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { - // Minimum execution time: 63_088 nanoseconds. - Weight::from_ref_time(67_803_536 as u64) - // Standard Error: 197_102 - .saturating_add(Weight::from_ref_time(31_557_563 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(r as u64))) + (51_518_000 as Weight) + // Standard Error: 83_000 + .saturating_add((27_235_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) - /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { - // Minimum execution time: 45_150 nanoseconds. - Weight::from_ref_time(51_547_530 as u64) - // Standard Error: 771_127 - .saturating_add(Weight::from_ref_time(26_927_969 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(r as u64))) + (37_885_000 as Weight) + // Standard Error: 75_000 + .saturating_add((24_395_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - // Minimum execution time: 75_067 nanoseconds. - Weight::from_ref_time(76_888_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (67_703_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index e50d39ff76902..e0b85ed7d18df 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -24,19 +24,17 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } -log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-scheduler = { version = "4.0.0-dev", path = "../scheduler" } -pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } +sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", @@ -44,7 +42,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", - "sp-core/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -52,4 +49,4 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime",] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 424192e2521da..c51fde8a3de8b 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -22,51 +22,62 @@ use super::*; use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ assert_noop, assert_ok, - traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, + codec::Decode, + traits::{ + schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, + }, }; -use frame_system::RawOrigin; -use sp_core::H256; -use sp_runtime::{traits::Bounded, BoundedVec}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::{BadOrigin, Bounded, One}; use crate::Pallet as Democracy; -const REFERENDUM_COUNT_HINT: u32 = 10; const SEED: u32 = 0; +const MAX_REFERENDUMS: u32 = 99; +const MAX_SECONDERS: u32 = 100; +const MAX_BYTES: u32 = 16_384; + +fn assert_last_event(generic_event: ::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - // Give the account half of the maximum value of the `Balance` type. - // Otherwise some transfers will fail with an overflow error. - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn make_proposal(n: u32) -> BoundedCallOf { - let call: CallOf = frame_system::Call::remark { remark: n.encode() }.into(); - ::Preimages::bound(call).unwrap() -} - -fn add_proposal(n: u32) -> Result { +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); - let proposal = make_proposal::(n); - Democracy::::propose(RawOrigin::Signed(other).into(), proposal.clone(), value)?; - Ok(proposal.hash()) + let proposal_hash: T::Hash = T::Hashing::hash_of(&n); + + Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value)?; + + Ok(proposal_hash) } -fn add_referendum(n: u32) -> (ReferendumIndex, H256) { +fn add_referendum(n: u32) -> Result { + let proposal_hash: T::Hash = T::Hashing::hash_of(&n); let vote_threshold = VoteThreshold::SimpleMajority; - let proposal = make_proposal::(n); - let hash = proposal.hash(); - ( - Democracy::::inject_referendum( - T::LaunchPeriod::get(), - proposal, - vote_threshold, - 0u32.into(), - ), - hash, + + Democracy::::inject_referendum( + T::LaunchPeriod::get(), + proposal_hash, + vote_threshold, + 0u32.into(), + ); + let referendum_index: ReferendumIndex = ReferendumCount::::get() - 1; + T::Scheduler::schedule_named( + (DEMOCRACY_ID, referendum_index).encode(), + DispatchTime::At(2u32.into()), + None, + 63, + frame_system::RawOrigin::Root.into(), + Call::enact_proposal { proposal_hash, index: referendum_index }.into(), ) + .map_err(|_| "failed to schedule named")?; + Ok(referendum_index) } fn account_vote(b: BalanceOf) -> AccountVote> { @@ -84,90 +95,95 @@ benchmarks! { } let caller = funded_account::("caller", 0); - let proposal = make_proposal::(0); + let proposal_hash: T::Hash = T::Hashing::hash_of(&0); let value = T::MinimumDeposit::get(); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal, value) + }: _(RawOrigin::Signed(caller), proposal_hash, value) verify { assert_eq!(Democracy::::public_props().len(), p as usize, "Proposals not created."); } second { + let s in 0 .. MAX_SECONDERS; + let caller = funded_account::("caller", 0); - add_proposal::(0)?; + let proposal_hash = add_proposal::(s)?; // Create s existing "seconds" - // we must reserve one deposit for the `proposal` and one for our benchmarked `second` call. - for i in 0 .. T::MaxDeposits::get() - 2 { + for i in 0 .. s { let seconder = funded_account::("seconder", i); - Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; + Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::MAX)?; } let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); + assert_eq!(deposits.0.len(), (s + 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0) + }: _(RawOrigin::Signed(caller), 0, u32::MAX) verify { let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); + assert_eq!(deposits.0.len(), (s + 2) as usize, "`second` benchmark did not work"); } vote_new { + let r in 1 .. MAX_REFERENDUMS; + let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 .. T::MaxVotes::get() - 1 { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (T::MaxVotes::get() - 1) as usize, "Votes were not recorded."); + assert_eq!(votes.len(), r as usize, "Votes were not recorded."); - let ref_index = add_referendum::(T::MaxVotes::get() - 1).0; + let referendum_index = add_referendum::(r)?; whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote) + }: vote(RawOrigin::Signed(caller.clone()), referendum_index, account_vote) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was not recorded."); + assert_eq!(votes.len(), (r + 1) as usize, "Vote was not recorded."); } vote_existing { + let r in 1 .. MAX_REFERENDUMS; + let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0..T::MaxVotes::get() { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + for i in 0 ..=r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Votes were not recorded."); + assert_eq!(votes.len(), (r + 1) as usize, "Votes were not recorded."); // Change vote from aye to nay let nay = Vote { aye: false, conviction: Conviction::Locked1x }; let new_vote = AccountVote::Standard { vote: nay, balance: 1000u32.into() }; - let ref_index = Democracy::::referendum_count() - 1; + let referendum_index = Democracy::::referendum_count() - 1; // This tests when a user changes a vote whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote) + }: vote(RawOrigin::Signed(caller.clone()), referendum_index, new_vote) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); - let referendum_info = Democracy::::referendum_info(ref_index) + assert_eq!(votes.len(), (r + 1) as usize, "Vote was incorrectly added"); + let referendum_info = Democracy::::referendum_info(referendum_index) .ok_or("referendum doesn't exist")?; let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, @@ -178,55 +194,61 @@ benchmarks! { emergency_cancel { let origin = T::CancellationOrigin::successful_origin(); - let ref_index = add_referendum::(0).0; - assert_ok!(Democracy::::referendum_status(ref_index)); - }: _(origin, ref_index) + let referendum_index = add_referendum::(0)?; + assert_ok!(Democracy::::referendum_status(referendum_index)); + }: _(origin, referendum_index) verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(ref_index), + Democracy::::referendum_status(referendum_index), Error::::ReferendumInvalid, ); } blacklist { + let p in 1 .. T::MaxProposals::get(); + // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() - 1 { + for i in 0 .. p - 1 { add_proposal::(i)?; } // We should really add a lot of seconds here, but we're not doing it elsewhere. - // Add a referendum of our proposal. - let (ref_index, hash) = add_referendum::(0); - assert_ok!(Democracy::::referendum_status(ref_index)); // Place our proposal in the external queue, too. + let hash = T::Hashing::hash_of(&0); assert_ok!( - Democracy::::external_propose(T::ExternalOrigin::successful_origin(), make_proposal::(0)) + Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash) ); let origin = T::BlacklistOrigin::successful_origin(); - }: _(origin, hash, Some(ref_index)) + // Add a referendum of our proposal. + let referendum_index = add_referendum::(0)?; + assert_ok!(Democracy::::referendum_status(referendum_index)); + }: _(origin, hash, Some(referendum_index)) verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(ref_index), + Democracy::::referendum_status(referendum_index), Error::::ReferendumInvalid ); } // Worst case scenario, we external propose a previously blacklisted proposal external_propose { + let v in 1 .. MAX_VETOERS as u32; + let origin = T::ExternalOrigin::successful_origin(); - let proposal = make_proposal::(0); + let proposal_hash = T::Hashing::hash_of(&0); // Add proposal to blacklist with block number 0 - let addresses: BoundedVec<_, _> = (0..(T::MaxBlacklisted::get() - 1)) + let addresses = (0..v) .into_iter() .map(|i| account::("blacklist", i, SEED)) - .collect::>() - .try_into() - .unwrap(); - Blacklist::::insert(proposal.hash(), (T::BlockNumber::zero(), addresses)); - }: _(origin, proposal) + .collect::>(); + Blacklist::::insert( + proposal_hash, + (T::BlockNumber::zero(), addresses), + ); + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -234,8 +256,8 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal = make_proposal::(0); - }: _(origin, proposal) + let proposal_hash = T::Hashing::hash_of(&0); + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -243,8 +265,8 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); - let proposal = make_proposal::(0); - }: _(origin, proposal) + let proposal_hash = T::Hashing::hash_of(&0); + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -252,60 +274,74 @@ benchmarks! { fast_track { let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - let proposal = make_proposal::(0); - let proposal_hash = proposal.hash(); - Democracy::::external_propose_default(origin_propose, proposal)?; + let proposal_hash: T::Hash = T::Hashing::hash_of(&0); + Democracy::::external_propose_default(origin_propose, proposal_hash)?; // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::successful_origin(); let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) + }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) verify { assert_eq!(Democracy::::referendum_count(), 1, "referendum not created") } veto_external { - let proposal = make_proposal::(0); - let proposal_hash = proposal.hash(); + // Existing veto-ers + let v in 0 .. MAX_VETOERS as u32; + + let proposal_hash: T::Hash = T::Hashing::hash_of(&v); let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - Democracy::::external_propose_default(origin_propose, proposal)?; + Democracy::::external_propose_default(origin_propose, proposal_hash)?; - let mut vetoers: BoundedVec = Default::default(); - for i in 0 .. (T::MaxBlacklisted::get() - 1) { - vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); + let mut vetoers: Vec = Vec::new(); + for i in 0 .. v { + vetoers.push(account::("vetoer", i, SEED)); } vetoers.sort(); Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); let origin = T::VetoOrigin::successful_origin(); ensure!(NextExternal::::get().is_some(), "no external proposal"); - }: _(origin, proposal_hash) + }: _(origin, proposal_hash) verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; - assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); + assert_eq!(new_vetoers.len(), (v + 1) as usize, "vetoers not added"); } cancel_proposal { + let p in 1 .. T::MaxProposals::get(); + // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0 .. p { add_proposal::(i)?; } - let cancel_origin = T::CancelProposalOrigin::successful_origin(); - }: _(cancel_origin, 0) + }: _(RawOrigin::Root, 0) cancel_referendum { - let ref_index = add_referendum::(0).0; - }: _(RawOrigin::Root, ref_index) + let referendum_index = add_referendum::(0)?; + }: _(RawOrigin::Root, referendum_index) + cancel_queued { + let r in 1 .. MAX_REFERENDUMS; + + for i in 0..r { + add_referendum::(i)?; // This add one element in the scheduler + } + + let referendum_index = add_referendum::(r)?; + }: _(RawOrigin::Root, referendum_index) + + // This measures the path of `launch_next` external. Not currently used as we simply + // assume the weight is `MaxBlockWeight` when executing. #[extra] on_initialize_external { - let r in 0 .. REFERENDUM_COUNT_HINT; + let r in 0 .. MAX_REFERENDUMS; for i in 0..r { - add_referendum::(i); + add_referendum::(i)?; } assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); @@ -314,8 +350,8 @@ benchmarks! { LastTabledWasExternal::::put(false); let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal = make_proposal::(r); - let call = Call::::external_propose_majority { proposal }; + let proposal_hash = T::Hashing::hash_of(&r); + let call = Call::::external_propose_majority { proposal_hash }; call.dispatch_bypass_filter(origin)?; // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -339,12 +375,14 @@ benchmarks! { } } + // This measures the path of `launch_next` public. Not currently used as we simply + // assume the weight is `MaxBlockWeight` when executing. #[extra] on_initialize_public { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; for i in 0..r { - add_referendum::(i); + add_referendum::(i)?; } assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); @@ -373,10 +411,10 @@ benchmarks! { // No launch no maturing referenda. on_initialize_base { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; for i in 0..r { - add_referendum::(i); + add_referendum::(i)?; } for (key, mut info) in ReferendumInfoOf::::iter() { @@ -403,10 +441,10 @@ benchmarks! { } on_initialize_base_with_launch_period { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; for i in 0..r { - add_referendum::(i); + add_referendum::(i)?; } for (key, mut info) in ReferendumInfoOf::::iter() { @@ -435,7 +473,7 @@ benchmarks! { } delegate { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -443,10 +481,9 @@ benchmarks! { let caller = funded_account::("caller", 0); // Caller will initially delegate to `old_delegate` let old_delegate: T::AccountId = funded_account::("old_delegate", r); - let old_delegate_lookup = T::Lookup::unlookup(old_delegate.clone()); Democracy::::delegate( RawOrigin::Signed(caller.clone()).into(), - old_delegate_lookup, + old_delegate.clone(), Conviction::Locked1x, delegated_balance, )?; @@ -458,12 +495,11 @@ benchmarks! { assert_eq!(balance, delegated_balance, "delegation balance didn't work"); // Caller will now switch to `new_delegate` let new_delegate: T::AccountId = funded_account::("new_delegate", r); - let new_delegate_lookup = T::Lookup::unlookup(new_delegate.clone()); let account_vote = account_vote::(initial_balance); // We need to create existing direct votes for the `new_delegate` for i in 0..r { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_index, account_vote)?; + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_idx, account_vote)?; } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, @@ -471,7 +507,7 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), new_delegate_lookup, Conviction::Locked1x, delegated_balance) + }: _(RawOrigin::Signed(caller.clone()), new_delegate.clone(), Conviction::Locked1x, delegated_balance) verify { let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), @@ -487,7 +523,7 @@ benchmarks! { } undelegate { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -495,10 +531,9 @@ benchmarks! { let caller = funded_account::("caller", 0); // Caller will delegate let the_delegate: T::AccountId = funded_account::("delegate", r); - let the_delegate_lookup = T::Lookup::unlookup(the_delegate.clone()); Democracy::::delegate( RawOrigin::Signed(caller.clone()).into(), - the_delegate_lookup, + the_delegate.clone(), Conviction::Locked1x, delegated_balance, )?; @@ -511,10 +546,10 @@ benchmarks! { // We need to create votes direct votes for the `delegate` let account_vote = account_vote::(initial_balance); for i in 0..r { - let ref_index = add_referendum::(i).0; + let ref_idx = add_referendum::(i)?; Democracy::::vote( RawOrigin::Signed(the_delegate.clone()).into(), - ref_index, + ref_idx, account_vote )?; } @@ -538,25 +573,86 @@ benchmarks! { }: _(RawOrigin::Root) + note_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + let caller = funded_account::("caller", 0); + let encoded_proposal = vec![1; b as usize]; + whitelist_account!(caller); + }: _(RawOrigin::Signed(caller), encoded_proposal.clone()) + verify { + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + match Preimages::::get(proposal_hash) { + Some(PreimageStatus::Available { .. }) => (), + _ => return Err("preimage not available".into()) + } + } + + note_imminent_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + // d + 1 to include the one we are testing + let encoded_proposal = vec![1; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + let block_number = T::BlockNumber::one(); + Preimages::::insert(&proposal_hash, PreimageStatus::Missing(block_number)); + + let caller = funded_account::("caller", 0); + let encoded_proposal = vec![1; b as usize]; + whitelist_account!(caller); + }: _(RawOrigin::Signed(caller), encoded_proposal.clone()) + verify { + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + match Preimages::::get(proposal_hash) { + Some(PreimageStatus::Available { .. }) => (), + _ => return Err("preimage not available".into()) + } + } + + reap_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + let encoded_proposal = vec![1; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + + let submitter = funded_account::("submitter", b); + Democracy::::note_preimage(RawOrigin::Signed(submitter).into(), encoded_proposal.clone())?; + + // We need to set this otherwise we get `Early` error. + let block_number = T::VotingPeriod::get() + T::EnactmentPeriod::get() + T::BlockNumber::one(); + System::::set_block_number(block_number); + + assert!(Preimages::::contains_key(proposal_hash)); + + let caller = funded_account::("caller", 0); + whitelist_account!(caller); + }: _(RawOrigin::Signed(caller), proposal_hash, u32::MAX) + verify { + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + assert!(!Preimages::::contains_key(proposal_hash)); + } + // Test when unlock will remove locks unlock_remove { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; let locker = funded_account::("locker", 0); - let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); // Vote and immediately unvote for i in 0 .. r { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; - Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_idx, small_vote)?; + Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_idx)?; } let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker_lookup) + }: unlock(RawOrigin::Signed(caller), locker.clone()) verify { // Note that we may want to add a `get_lock` api to actually verify let voting = VotingOf::::get(&locker); @@ -565,22 +661,21 @@ benchmarks! { // Test when unlock will set a new value unlock_set { - let r in 0 .. (T::MaxVotes::get() - 1); + let r in 1 .. MAX_REFERENDUMS; let locker = funded_account::("locker", 0); - let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); for i in 0 .. r { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_idx, small_vote)?; } // Create a big vote so lock increases let big_vote = account_vote::(base_balance * 10u32.into()); - let ref_index = add_referendum::(r).0; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, big_vote)?; + let referendum_index = add_referendum::(r)?; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), referendum_index, big_vote)?; let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, @@ -591,11 +686,11 @@ benchmarks! { let voting = VotingOf::::get(&locker); assert_eq!(voting.locked_balance(), base_balance * 10u32.into()); - Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; + Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), referendum_index)?; let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker_lookup) + }: unlock(RawOrigin::Signed(caller), locker.clone()) verify { let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, @@ -605,18 +700,18 @@ benchmarks! { let voting = VotingOf::::get(&locker); // Note that we may want to add a `get_lock` api to actually verify - assert_eq!(voting.locked_balance(), if r > 0 { base_balance } else { 0u32.into() }); + assert_eq!(voting.locked_balance(), base_balance); } remove_vote { - let r in 1 .. T::MaxVotes::get(); + let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -625,9 +720,9 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes not created"); - let ref_index = r - 1; + let referendum_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), ref_index) + }: _(RawOrigin::Signed(caller.clone()), referendum_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -638,15 +733,14 @@ benchmarks! { // Worst case is when target == caller and referendum is ongoing remove_other_vote { - let r in 1 .. T::MaxVotes::get(); + let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", r); - let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { - let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -655,9 +749,9 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes not created"); - let ref_index = r - 1; + let referendum_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) + }: _(RawOrigin::Signed(caller.clone()), caller.clone(), referendum_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -666,6 +760,54 @@ benchmarks! { assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } + #[extra] + enact_proposal_execute { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + let proposer = funded_account::("proposer", 0); + let raw_call = Call::note_preimage { encoded_proposal: vec![1; b as usize] }; + let generic_call: T::Proposal = raw_call.into(); + let encoded_proposal = generic_call.encode(); + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + Democracy::::note_preimage(RawOrigin::Signed(proposer).into(), encoded_proposal)?; + + match Preimages::::get(proposal_hash) { + Some(PreimageStatus::Available { .. }) => (), + _ => return Err("preimage not available".into()) + } + }: enact_proposal(RawOrigin::Root, proposal_hash, 0) + verify { + // Fails due to mismatched origin + assert_last_event::(Event::::Executed { ref_index: 0, result: Err(BadOrigin.into()) }.into()); + } + + #[extra] + enact_proposal_slash { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + let proposer = funded_account::("proposer", 0); + // Random invalid bytes + let encoded_proposal = vec![200; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + Democracy::::note_preimage(RawOrigin::Signed(proposer).into(), encoded_proposal)?; + + match Preimages::::get(proposal_hash) { + Some(PreimageStatus::Available { .. }) => (), + _ => return Err("preimage not available".into()) + } + let origin = RawOrigin::Root.into(); + let call = Call::::enact_proposal { proposal_hash, index: 0 }.encode(); + }: { + assert_eq!( + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin), + Err(Error::::PreimageInvalid.into()) + ); + } + impl_benchmark_test_suite!( Democracy, crate::tests::new_test_ext(), diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index a938d8a4e6852..57d631e8c1f4c 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -18,7 +18,7 @@ //! The conviction datatype. use crate::types::Delegations; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Bounded, CheckedDiv, CheckedMul, Zero}, @@ -27,19 +27,7 @@ use sp_runtime::{ use sp_std::{prelude::*, result::Result}; /// A value denoting the strength of conviction of a vote. -#[derive( - Encode, - MaxEncodedLen, - Decode, - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - RuntimeDebug, - TypeInfo, -)] +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo)] pub enum Conviction { /// 0.1x votes, unlocked. None, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index cf954d4800eee..443b8579116d0 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -152,20 +152,21 @@ #![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; +use codec::{Decode, Encode, Input}; use frame_support::{ ensure, traits::{ defensive_prelude::*, - schedule::{v3::Named as ScheduleNamed, DispatchTime}, - Bounded, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, QueryPreimage, - ReservableCurrency, StorePreimage, WithdrawReasons, + schedule::{DispatchTime, Named as ScheduleNamed}, + BalanceStatus, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, + ReservableCurrency, WithdrawReasons, }, weights::Weight, }; +use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded as ArithBounded, One, Saturating, StaticLookup, Zero}, - ArithmeticError, DispatchError, DispatchResult, + traits::{Bounded, Dispatchable, Hash, Saturating, Zero}, + ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, }; use sp_std::prelude::*; @@ -187,10 +188,13 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -pub mod migrations; - const DEMOCRACY_ID: LockIdentifier = *b"democrac"; +/// The maximum number of vetoers on a single proposal used to compute Weight. +/// +/// NOTE: This is not enforced by any logic. +pub const MAX_VETOERS: u32 = 100; + /// A proposal index. pub type PropIndex = u32; @@ -202,35 +206,54 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; -pub type CallOf = ::RuntimeCall; -pub type BoundedCallOf = Bounded>; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum PreimageStatus { + /// The preimage is imminently needed at the argument. + Missing(BlockNumber), + /// The preimage is available. + Available { + data: Vec, + provider: AccountId, + deposit: Balance, + since: BlockNumber, + /// None if it's not imminent. + expiry: Option, + }, +} + +impl PreimageStatus { + fn to_missing_expiry(self) -> Option { + match self { + PreimageStatus::Missing(expiry) => Some(expiry), + _ => None, + } + } +} + +// A value placed in storage that represents the current version of the Democracy storage. +// This value is used by the `on_runtime_upgrade` logic to determine whether we run +// storage migration logic. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] +enum Releases { + V1, +} #[frame_support::pallet] pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use sp_core::H256; - - /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config + Sized { - type WeightInfo: WeightInfo; - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// The Scheduler. - type Scheduler: ScheduleNamed, Self::PalletsOrigin>; - - /// The Preimage provider. - type Preimages: QueryPreimage + StorePreimage; + type Proposal: Parameter + Dispatchable + From>; + type Event: From> + IsType<::Event>; /// Currency type for this pallet. type Currency: ReservableCurrency @@ -263,104 +286,121 @@ pub mod pallet { #[pallet::constant] type MinimumDeposit: Get>; - /// Indicator for whether an emergency origin is even allowed to happen. Some chains may - /// want to set this permanently to `false`, others may want to condition it on things such - /// as an upgrade having happened recently. - #[pallet::constant] - type InstantAllowed: Get; - - /// Minimum voting period allowed for a fast-track referendum. - #[pallet::constant] - type FastTrackVotingPeriod: Get; - - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - #[pallet::constant] - type CooloffPeriod: Get; - - /// The maximum number of votes for an account. - /// - /// Also used to compute weight, an overly big value can - /// lead to extrinsic with very big weight: see `delegate` for instance. - #[pallet::constant] - type MaxVotes: Get; - - /// The maximum number of public proposals that can exist at any time. - #[pallet::constant] - type MaxProposals: Get; - - /// The maximum number of deposits a public proposal may have at any time. - #[pallet::constant] - type MaxDeposits: Get; - - /// The maximum number of items which can be blacklisted. - #[pallet::constant] - type MaxBlacklisted: Get; - /// Origin from which the next tabled referendum may be forced. This is a normal /// "super-majority-required" referendum. - type ExternalOrigin: EnsureOrigin; + type ExternalOrigin: EnsureOrigin; /// Origin from which the next tabled referendum may be forced; this allows for the tabling /// of a majority-carries referendum. - type ExternalMajorityOrigin: EnsureOrigin; + type ExternalMajorityOrigin: EnsureOrigin; /// Origin from which the next tabled referendum may be forced; this allows for the tabling /// of a negative-turnout-bias (default-carries) referendum. - type ExternalDefaultOrigin: EnsureOrigin; + type ExternalDefaultOrigin: EnsureOrigin; /// Origin from which the next majority-carries (or more permissive) referendum may be /// tabled to vote according to the `FastTrackVotingPeriod` asynchronously in a similar /// manner to the emergency origin. It retains its threshold method. - type FastTrackOrigin: EnsureOrigin; + type FastTrackOrigin: EnsureOrigin; /// Origin from which the next majority-carries (or more permissive) referendum may be /// tabled to vote immediately and asynchronously in a similar manner to the emergency /// origin. It retains its threshold method. - type InstantOrigin: EnsureOrigin; + type InstantOrigin: EnsureOrigin; + + /// Indicator for whether an emergency origin is even allowed to happen. Some chains may + /// want to set this permanently to `false`, others may want to condition it on things such + /// as an upgrade having happened recently. + #[pallet::constant] + type InstantAllowed: Get; + + /// Minimum voting period allowed for a fast-track referendum. + #[pallet::constant] + type FastTrackVotingPeriod: Get; /// Origin from which any referendum may be cancelled in an emergency. - type CancellationOrigin: EnsureOrigin; + type CancellationOrigin: EnsureOrigin; /// Origin from which proposals may be blacklisted. - type BlacklistOrigin: EnsureOrigin; + type BlacklistOrigin: EnsureOrigin; /// Origin from which a proposal may be cancelled and its backers slashed. - type CancelProposalOrigin: EnsureOrigin; + type CancelProposalOrigin: EnsureOrigin; /// Origin for anyone able to veto proposals. - type VetoOrigin: EnsureOrigin; + /// + /// # Warning + /// + /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to + /// [MAX_VETOERS](./const.MAX_VETOERS.html) + type VetoOrigin: EnsureOrigin; - /// Overarching type of all pallets origins. - type PalletsOrigin: From>; + /// Period in blocks where an external proposal may not be re-submitted after being vetoed. + #[pallet::constant] + type CooloffPeriod: Get; + + /// The amount of balance that must be deposited per byte of preimage stored. + #[pallet::constant] + type PreimageByteDeposit: Get>; + + /// An origin that can provide a preimage using operational extrinsics. + type OperationalPreimageOrigin: EnsureOrigin; /// Handler for the unbalanced reduction when slashing a preimage deposit. type Slash: OnUnbalanced>; + + /// The Scheduler. + type Scheduler: ScheduleNamed; + + /// Overarching type of all pallets origins. + type PalletsOrigin: From>; + + /// The maximum number of votes for an account. + /// + /// Also used to compute weight, an overly big value can + /// lead to extrinsic with very big weight: see `delegate` for instance. + #[pallet::constant] + type MaxVotes: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// The maximum number of public proposals that can exist at any time. + #[pallet::constant] + type MaxProposals: Get; } + // TODO: Refactor public proposal queue into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 /// The number of (public) proposals that have been made so far. #[pallet::storage] #[pallet::getter(fn public_prop_count)] pub type PublicPropCount = StorageValue<_, PropIndex, ValueQuery>; - /// The public proposals. Unsorted. The second item is the proposal. + /// The public proposals. Unsorted. The second item is the proposal's hash. #[pallet::storage] #[pallet::getter(fn public_props)] - pub type PublicProps = StorageValue< - _, - BoundedVec<(PropIndex, BoundedCallOf, T::AccountId), T::MaxProposals>, - ValueQuery, - >; + pub type PublicProps = + StorageValue<_, Vec<(PropIndex, T::Hash, T::AccountId)>, ValueQuery>; /// Those who have locked a deposit. /// /// TWOX-NOTE: Safe, as increasing integer keys are safe. #[pallet::storage] #[pallet::getter(fn deposit_of)] - pub type DepositOf = StorageMap< + pub type DepositOf = + StorageMap<_, Twox64Concat, PropIndex, (Vec, BalanceOf)>; + + /// Map of hashes to the proposal preimage, along with who registered it and their deposit. + /// The block number is the block at which it was deposited. + // TODO: Refactor Preimages into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + #[pallet::storage] + pub type Preimages = StorageMap< _, - Twox64Concat, - PropIndex, - (BoundedVec, BalanceOf), + Identity, + T::Hash, + PreimageStatus, T::BlockNumber>, >; /// The next free referendum index, aka the number of referenda started so far. @@ -383,7 +423,7 @@ pub mod pallet { _, Twox64Concat, ReferendumIndex, - ReferendumInfo, BalanceOf>, + ReferendumInfo>, >; /// All votes for a particular voter. We store the balance for the number of votes that we @@ -395,12 +435,14 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - Voting, T::AccountId, T::BlockNumber, T::MaxVotes>, + Voting, T::AccountId, T::BlockNumber>, ValueQuery, >; /// True if the last referendum tabled was submitted externally. False if it was a public /// proposal. + // TODO: There should be any number of tabling origins, not just public and "external" + // (council). https://github.com/paritytech/substrate/issues/5322 #[pallet::storage] pub type LastTabledWasExternal = StorageValue<_, bool, ValueQuery>; @@ -409,21 +451,23 @@ pub mod pallet { /// - `LastTabledWasExternal` is `false`; or /// - `PublicProps` is empty. #[pallet::storage] - pub type NextExternal = StorageValue<_, (BoundedCallOf, VoteThreshold)>; + pub type NextExternal = StorageValue<_, (T::Hash, VoteThreshold)>; /// A record of who vetoed what. Maps proposal hash to a possible existent block number /// (until when it may not be resubmitted) and who vetoed it. #[pallet::storage] - pub type Blacklist = StorageMap< - _, - Identity, - H256, - (T::BlockNumber, BoundedVec), - >; + pub type Blacklist = + StorageMap<_, Identity, T::Hash, (T::BlockNumber, Vec)>; /// Record of all proposals that have been subject to emergency cancellation. #[pallet::storage] - pub type Cancellations = StorageMap<_, Identity, H256, bool, ValueQuery>; + pub type Cancellations = StorageMap<_, Identity, T::Hash, bool, ValueQuery>; + + /// Storage version of the pallet. + /// + /// New networks start with last version. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -443,6 +487,7 @@ pub mod pallet { PublicPropCount::::put(0 as PropIndex); ReferendumCount::::put(0 as ReferendumIndex); LowestUnbaked::::put(0 as ReferendumIndex); + StorageVersion::::put(Releases::V1); } } @@ -452,7 +497,7 @@ pub mod pallet { /// A motion has been proposed by a public account. Proposed { proposal_index: PropIndex, deposit: BalanceOf }, /// A public proposal has been tabled for referendum vote. - Tabled { proposal_index: PropIndex, deposit: BalanceOf }, + Tabled { proposal_index: PropIndex, deposit: BalanceOf, depositors: Vec }, /// An external proposal has been tabled. ExternalTabled, /// A referendum has begun. @@ -463,14 +508,31 @@ pub mod pallet { NotPassed { ref_index: ReferendumIndex }, /// A referendum has been cancelled. Cancelled { ref_index: ReferendumIndex }, + /// A proposal has been enacted. + Executed { ref_index: ReferendumIndex, result: DispatchResult }, /// An account has delegated their vote to another account. Delegated { who: T::AccountId, target: T::AccountId }, /// An account has cancelled a previous delegation operation. Undelegated { account: T::AccountId }, /// An external proposal has been vetoed. - Vetoed { who: T::AccountId, proposal_hash: H256, until: T::BlockNumber }, + Vetoed { who: T::AccountId, proposal_hash: T::Hash, until: T::BlockNumber }, + /// A proposal's preimage was noted, and the deposit taken. + PreimageNoted { proposal_hash: T::Hash, who: T::AccountId, deposit: BalanceOf }, + /// A proposal preimage was removed and used (the deposit was returned). + PreimageUsed { proposal_hash: T::Hash, provider: T::AccountId, deposit: BalanceOf }, + /// A proposal could not be executed because its preimage was invalid. + PreimageInvalid { proposal_hash: T::Hash, ref_index: ReferendumIndex }, + /// A proposal could not be executed because its preimage was missing. + PreimageMissing { proposal_hash: T::Hash, ref_index: ReferendumIndex }, + /// A registered preimage was removed and the deposit collected by the reaper. + PreimageReaped { + proposal_hash: T::Hash, + provider: T::AccountId, + deposit: BalanceOf, + reaper: T::AccountId, + }, /// A proposal_hash has been blacklisted permanently. - Blacklisted { proposal_hash: H256 }, + Blacklisted { proposal_hash: T::Hash }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, /// An account has secconded a proposal @@ -499,8 +561,20 @@ pub mod pallet { NoProposal, /// Identity may not veto a proposal twice AlreadyVetoed, + /// Preimage already noted + DuplicatePreimage, + /// Not imminent + NotImminent, + /// Too early + TooEarly, + /// Imminent + Imminent, + /// Preimage not found + PreimageMissing, /// Vote given for invalid referendum ReferendumInvalid, + /// Invalid preimage + PreimageInvalid, /// No proposals waiting NoneWaiting, /// The given account did not vote on the referendum. @@ -524,8 +598,8 @@ pub mod pallet { WrongUpperBound, /// Maximum number of votes reached. MaxVotesReached, - /// Maximum number of items reached. - TooMany, + /// Maximum number of proposals reached. + TooManyProposals, /// Voting period too low VotingPeriodLow, } @@ -549,10 +623,12 @@ pub mod pallet { /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). /// /// Emits `Proposed`. + /// + /// Weight: `O(p)` #[pallet::weight(T::WeightInfo::propose())] pub fn propose( origin: OriginFor, - proposal: BoundedCallOf, + proposal_hash: T::Hash, #[pallet::compact] value: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -561,8 +637,7 @@ pub mod pallet { let index = Self::public_prop_count(); let real_prop_count = PublicProps::::decode_len().unwrap_or(0) as u32; let max_proposals = T::MaxProposals::get(); - ensure!(real_prop_count < max_proposals, Error::::TooMany); - let proposal_hash = proposal.hash(); + ensure!(real_prop_count < max_proposals, Error::::TooManyProposals); if let Some((until, _)) = >::get(proposal_hash) { ensure!( @@ -572,14 +647,10 @@ pub mod pallet { } T::Currency::reserve(&who, value)?; - - let depositors = BoundedVec::<_, T::MaxDeposits>::truncate_from(vec![who.clone()]); - DepositOf::::insert(index, (depositors, value)); - PublicPropCount::::put(index + 1); + >::insert(index, (&[&who][..], value)); - PublicProps::::try_append((index, proposal, who)) - .map_err(|_| Error::::TooMany)?; + >::append((index, proposal_hash, who)); Self::deposit_event(Event::::Proposed { proposal_index: index, deposit: value }); Ok(()) @@ -591,19 +662,23 @@ pub mod pallet { /// must have funds to cover the deposit, equal to the original deposit. /// /// - `proposal`: The index of the proposal to second. - #[pallet::weight(T::WeightInfo::second())] + /// - `seconds_upper_bound`: an upper bound on the current number of seconds on this + /// proposal. Extrinsic is weighted according to this value with no refund. + /// + /// Weight: `O(S)` where S is the number of seconds a proposal already has. + #[pallet::weight(T::WeightInfo::second(*seconds_upper_bound))] pub fn second( origin: OriginFor, #[pallet::compact] proposal: PropIndex, + #[pallet::compact] seconds_upper_bound: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; let seconds = Self::len_of_deposit_of(proposal).ok_or(Error::::ProposalMissing)?; - ensure!(seconds < T::MaxDeposits::get(), Error::::TooMany); + ensure!(seconds <= seconds_upper_bound, Error::::WrongUpperBound); let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; - let ok = deposit.0.try_push(who.clone()).is_ok(); - debug_assert!(ok, "`seconds` is below static limit; `try_insert` should succeed; qed"); + deposit.0.push(who.clone()); >::insert(proposal, deposit); Self::deposit_event(Event::::Seconded { seconder: who, prop_index: proposal }); Ok(()) @@ -616,7 +691,12 @@ pub mod pallet { /// /// - `ref_index`: The index of the referendum to vote for. /// - `vote`: The vote configuration. - #[pallet::weight(T::WeightInfo::vote_new().max(T::WeightInfo::vote_existing()))] + /// + /// Weight: `O(R)` where R is the number of referendums the voter has voted on. + #[pallet::weight( + T::WeightInfo::vote_new(T::MaxVotes::get()) + .max(T::WeightInfo::vote_existing(T::MaxVotes::get())) + )] pub fn vote( origin: OriginFor, #[pallet::compact] ref_index: ReferendumIndex, @@ -642,7 +722,7 @@ pub mod pallet { T::CancellationOrigin::ensure_origin(origin)?; let status = Self::referendum_status(ref_index)?; - let h = status.proposal.hash(); + let h = status.proposal_hash; ensure!(!>::contains_key(h), Error::::AlreadyCanceled); >::insert(h, true); @@ -656,20 +736,20 @@ pub mod pallet { /// The dispatch origin of this call must be `ExternalOrigin`. /// /// - `proposal_hash`: The preimage hash of the proposal. - #[pallet::weight(T::WeightInfo::external_propose())] - pub fn external_propose( - origin: OriginFor, - proposal: BoundedCallOf, - ) -> DispatchResult { + /// + /// Weight: `O(V)` with V number of vetoers in the blacklist of proposal. + /// Decoding vec of length V. Charged as maximum + #[pallet::weight(T::WeightInfo::external_propose(MAX_VETOERS))] + pub fn external_propose(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::DuplicateProposal); - if let Some((until, _)) = >::get(proposal.hash()) { + if let Some((until, _)) = >::get(proposal_hash) { ensure!( >::block_number() >= until, Error::::ProposalBlacklisted, ); } - >::put((proposal, VoteThreshold::SuperMajorityApprove)); + >::put((proposal_hash, VoteThreshold::SuperMajorityApprove)); Ok(()) } @@ -687,10 +767,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::external_propose_majority())] pub fn external_propose_majority( origin: OriginFor, - proposal: BoundedCallOf, + proposal_hash: T::Hash, ) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; - >::put((proposal, VoteThreshold::SimpleMajority)); + >::put((proposal_hash, VoteThreshold::SimpleMajority)); Ok(()) } @@ -708,10 +788,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::external_propose_default())] pub fn external_propose_default( origin: OriginFor, - proposal: BoundedCallOf, + proposal_hash: T::Hash, ) -> DispatchResult { T::ExternalDefaultOrigin::ensure_origin(origin)?; - >::put((proposal, VoteThreshold::SuperMajorityAgainst)); + >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); Ok(()) } @@ -722,7 +802,7 @@ pub mod pallet { /// The dispatch of this call must be `FastTrackOrigin`. /// /// - `proposal_hash`: The hash of the current external proposal. - /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to + /// - `voting_period`: The period that is allowed for voting on this proposal. /// Must be always greater than zero. /// For `FastTrackOrigin` must be equal or greater than `FastTrackVotingPeriod`. /// - `delay`: The number of block after voting has ended in approval and this should be @@ -734,7 +814,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::fast_track())] pub fn fast_track( origin: OriginFor, - proposal_hash: H256, + proposal_hash: T::Hash, voting_period: T::BlockNumber, delay: T::BlockNumber, ) -> DispatchResult { @@ -753,21 +833,20 @@ pub mod pallet { T::InstantOrigin::ensure_origin(ensure_instant)?; ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } - ensure!(voting_period > T::BlockNumber::zero(), Error::::VotingPeriodLow); - let (ext_proposal, threshold) = + let (e_proposal_hash, threshold) = >::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, ); - ensure!(proposal_hash == ext_proposal.hash(), Error::::InvalidHash); + ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); >::kill(); let now = >::block_number(); Self::inject_referendum( now.saturating_add(voting_period), - ext_proposal, + proposal_hash, threshold, delay, ); @@ -783,24 +862,22 @@ pub mod pallet { /// Emits `Vetoed`. /// /// Weight: `O(V + log(V))` where V is number of `existing vetoers` - #[pallet::weight(T::WeightInfo::veto_external())] - pub fn veto_external(origin: OriginFor, proposal_hash: H256) -> DispatchResult { + #[pallet::weight(T::WeightInfo::veto_external(MAX_VETOERS))] + pub fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; - if let Some((ext_proposal, _)) = NextExternal::::get() { - ensure!(proposal_hash == ext_proposal.hash(), Error::::ProposalMissing); + if let Some((e_proposal_hash, _)) = >::get() { + ensure!(proposal_hash == e_proposal_hash, Error::::ProposalMissing); } else { return Err(Error::::NoProposal.into()) } let mut existing_vetoers = - >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); + >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_else(Vec::new); let insert_position = existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; - existing_vetoers - .try_insert(insert_position, who.clone()) - .map_err(|_| Error::::TooMany)?; + existing_vetoers.insert(insert_position, who.clone()); let until = >::block_number().saturating_add(T::CooloffPeriod::get()); >::insert(&proposal_hash, (until, existing_vetoers)); @@ -827,6 +904,21 @@ pub mod pallet { Ok(()) } + /// Cancel a proposal queued for enactment. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// - `which`: The index of the referendum to cancel. + /// + /// Weight: `O(D)` where `D` is the items in the dispatch queue. Weighted as `D = 10`. + #[pallet::weight((T::WeightInfo::cancel_queued(10), DispatchClass::Operational))] + pub fn cancel_queued(origin: OriginFor, which: ReferendumIndex) -> DispatchResult { + ensure_root(origin)?; + T::Scheduler::cancel_named((DEMOCRACY_ID, which).encode()) + .map_err(|_| Error::::ProposalMissing)?; + Ok(()) + } + /// Delegate the voting power (with some given conviction) of the sending account. /// /// The balance delegated is locked for as long as it's delegated, and thereafter for the @@ -852,12 +944,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::delegate(T::MaxVotes::get()))] pub fn delegate( origin: OriginFor, - to: AccountIdLookupOf, + to: T::AccountId, conviction: Conviction, balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let to = T::Lookup::lookup(to)?; let votes = Self::try_delegate(who, to, conviction, balance)?; Ok(Some(T::WeightInfo::delegate(votes)).into()) @@ -896,6 +987,135 @@ pub mod pallet { Ok(()) } + /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be + /// in the dispatch queue but does require a deposit, returned once enacted. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `encoded_proposal`: The preimage of a proposal. + /// + /// Emits `PreimageNoted`. + /// + /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). + #[pallet::weight(T::WeightInfo::note_preimage(encoded_proposal.len() as u32))] + pub fn note_preimage(origin: OriginFor, encoded_proposal: Vec) -> DispatchResult { + Self::note_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; + Ok(()) + } + + /// Same as `note_preimage` but origin is `OperationalPreimageOrigin`. + #[pallet::weight(( + T::WeightInfo::note_preimage(encoded_proposal.len() as u32), + DispatchClass::Operational, + ))] + pub fn note_preimage_operational( + origin: OriginFor, + encoded_proposal: Vec, + ) -> DispatchResult { + let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; + Self::note_preimage_inner(who, encoded_proposal)?; + Ok(()) + } + + /// Register the preimage for an upcoming proposal. This requires the proposal to be + /// in the dispatch queue. No deposit is needed. When this call is successful, i.e. + /// the preimage has not been uploaded before and matches some imminent proposal, + /// no fee is paid. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `encoded_proposal`: The preimage of a proposal. + /// + /// Emits `PreimageNoted`. + /// + /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). + #[pallet::weight(T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32))] + pub fn note_imminent_preimage( + origin: OriginFor, + encoded_proposal: Vec, + ) -> DispatchResultWithPostInfo { + Self::note_imminent_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; + // We check that this preimage was not uploaded before in + // `note_imminent_preimage_inner`, thus this call can only be successful once. If + // successful, user does not pay a fee. + Ok(Pays::No.into()) + } + + /// Same as `note_imminent_preimage` but origin is `OperationalPreimageOrigin`. + #[pallet::weight(( + T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), + DispatchClass::Operational, + ))] + pub fn note_imminent_preimage_operational( + origin: OriginFor, + encoded_proposal: Vec, + ) -> DispatchResultWithPostInfo { + let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; + Self::note_imminent_preimage_inner(who, encoded_proposal)?; + // We check that this preimage was not uploaded before in + // `note_imminent_preimage_inner`, thus this call can only be successful once. If + // successful, user does not pay a fee. + Ok(Pays::No.into()) + } + + /// Remove an expired proposal preimage and collect the deposit. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proposal_hash`: The preimage hash of a proposal. + /// - `proposal_length_upper_bound`: an upper bound on length of the proposal. Extrinsic is + /// weighted according to this value with no refund. + /// + /// This will only work after `VotingPeriod` blocks from the time that the preimage was + /// noted, if it's the same account doing it. If it's a different account, then it'll only + /// work an additional `EnactmentPeriod` later. + /// + /// Emits `PreimageReaped`. + /// + /// Weight: `O(D)` where D is length of proposal. + #[pallet::weight(T::WeightInfo::reap_preimage(*proposal_len_upper_bound))] + pub fn reap_preimage( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] proposal_len_upper_bound: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + ensure!( + Self::pre_image_data_len(proposal_hash)? <= proposal_len_upper_bound, + Error::::WrongUpperBound, + ); + + let (provider, deposit, since, expiry) = >::get(&proposal_hash) + .and_then(|m| match m { + PreimageStatus::Available { provider, deposit, since, expiry, .. } => + Some((provider, deposit, since, expiry)), + _ => None, + }) + .ok_or(Error::::PreimageMissing)?; + + let now = >::block_number(); + let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); + let additional = if who == provider { Zero::zero() } else { enactment }; + ensure!( + now >= since.saturating_add(voting).saturating_add(additional), + Error::::TooEarly + ); + ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); + + let res = + T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + debug_assert!(res.is_ok()); + >::remove(&proposal_hash); + Self::deposit_event(Event::::PreimageReaped { + proposal_hash, + provider, + deposit, + reaper: who, + }); + Ok(()) + } + /// Unlock tokens that have an expired lock. /// /// The dispatch origin of this call must be _Signed_. @@ -903,10 +1123,12 @@ pub mod pallet { /// - `target`: The account to remove the lock on. /// /// Weight: `O(R)` with R number of vote of target. - #[pallet::weight(T::WeightInfo::unlock_set(T::MaxVotes::get()).max(T::WeightInfo::unlock_remove(T::MaxVotes::get())))] - pub fn unlock(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight( + T::WeightInfo::unlock_set(T::MaxVotes::get()) + .max(T::WeightInfo::unlock_remove(T::MaxVotes::get())) + )] + pub fn unlock(origin: OriginFor, target: T::AccountId) -> DispatchResult { ensure_signed(origin)?; - let target = T::Lookup::lookup(target)?; Self::update_lock(&target); Ok(()) } @@ -962,16 +1184,26 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_other_vote(T::MaxVotes::get()))] pub fn remove_other_vote( origin: OriginFor, - target: AccountIdLookupOf, + target: T::AccountId, index: ReferendumIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; - let target = T::Lookup::lookup(target)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, scope)?; Ok(()) } + /// Enact a proposal from a referendum. For now we just make the weight be the maximum. + #[pallet::weight(T::BlockWeights::get().max_block)] + pub fn enact_proposal( + origin: OriginFor, + proposal_hash: T::Hash, + index: ReferendumIndex, + ) -> DispatchResult { + ensure_root(origin)?; + Self::do_enact_proposal(proposal_hash, index) + } + /// Permanently place a proposal into the blacklist. This prevents it from ever being /// proposed again. /// @@ -987,21 +1219,21 @@ pub mod pallet { /// /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). - #[pallet::weight((T::WeightInfo::blacklist(), DispatchClass::Operational))] + #[pallet::weight((T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational))] pub fn blacklist( origin: OriginFor, - proposal_hash: H256, + proposal_hash: T::Hash, maybe_ref_index: Option, ) -> DispatchResult { T::BlacklistOrigin::ensure_origin(origin)?; // Insert the proposal into the blacklist. - let permanent = (T::BlockNumber::max_value(), BoundedVec::::default()); + let permanent = (T::BlockNumber::max_value(), Vec::::new()); Blacklist::::insert(&proposal_hash, permanent); // Remove the queued proposal, if it's there. PublicProps::::mutate(|props| { - if let Some(index) = props.iter().position(|p| p.1.hash() == proposal_hash) { + if let Some(index) = props.iter().position(|p| p.1 == proposal_hash) { let (prop_index, ..) = props.remove(index); if let Some((whos, amount)) = DepositOf::::take(prop_index) { for who in whos.into_iter() { @@ -1012,14 +1244,14 @@ pub mod pallet { }); // Remove the external queued referendum, if it's there. - if matches!(NextExternal::::get(), Some((p, ..)) if p.hash() == proposal_hash) { + if matches!(NextExternal::::get(), Some((h, ..)) if h == proposal_hash) { NextExternal::::kill(); } // Remove the referendum, if it's there. if let Some(ref_index) = maybe_ref_index { if let Ok(status) = Self::referendum_status(ref_index) { - if status.proposal.hash() == proposal_hash { + if status.proposal_hash == proposal_hash { Self::internal_cancel_referendum(ref_index); } } @@ -1036,7 +1268,7 @@ pub mod pallet { /// - `prop_index`: The index of the proposal to cancel. /// /// Weight: `O(p)` where `p = PublicProps::::decode_len()` - #[pallet::weight(T::WeightInfo::cancel_proposal())] + #[pallet::weight(T::WeightInfo::cancel_proposal(T::MaxProposals::get()))] pub fn cancel_proposal( origin: OriginFor, #[pallet::compact] prop_index: PropIndex, @@ -1056,25 +1288,6 @@ pub mod pallet { } } -pub trait EncodeInto: Encode { - fn encode_into + Default>(&self) -> T { - let mut t = T::default(); - self.using_encoded(|data| { - if data.len() <= t.as_mut().len() { - t.as_mut()[0..data.len()].copy_from_slice(data); - } else { - // encoded self is too big to fit into a T. hash it and use the first bytes of that - // instead. - let hash = sp_io::hashing::blake2_256(data); - let l = t.as_mut().len().min(hash.len()); - t.as_mut()[0..l].copy_from_slice(&hash[0..l]); - } - }); - t - } -} -impl EncodeInto for T {} - impl Pallet { // exposed immutables. @@ -1087,7 +1300,7 @@ impl Pallet { /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( n: T::BlockNumber, - ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); Self::maturing_referenda_at_inner(n, next..last) @@ -1096,7 +1309,7 @@ impl Pallet { fn maturing_referenda_at_inner( n: T::BlockNumber, range: core::ops::Range, - ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { range .into_iter() .map(|i| (i, Self::referendum_info(i))) @@ -1112,13 +1325,13 @@ impl Pallet { /// Start a referendum. pub fn internal_start_referendum( - proposal: BoundedCallOf, + proposal_hash: T::Hash, threshold: VoteThreshold, delay: T::BlockNumber, ) -> ReferendumIndex { >::inject_referendum( >::block_number().saturating_add(T::VotingPeriod::get()), - proposal, + proposal_hash, threshold, delay, ) @@ -1134,8 +1347,8 @@ impl Pallet { /// Ok if the given referendum is active, Err otherwise fn ensure_ongoing( - r: ReferendumInfo, BalanceOf>, - ) -> Result, BalanceOf>, DispatchError> { + r: ReferendumInfo>, + ) -> Result>, DispatchError> { match r { ReferendumInfo::Ongoing(s) => Ok(s), _ => Err(Error::::ReferendumInvalid.into()), @@ -1144,7 +1357,7 @@ impl Pallet { fn referendum_status( ref_index: ReferendumIndex, - ) -> Result, BalanceOf>, DispatchError> { + ) -> Result>, DispatchError> { let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; Self::ensure_ongoing(info) } @@ -1169,9 +1382,11 @@ impl Pallet { votes[i].1 = vote; }, Err(i) => { - votes - .try_insert(i, (ref_index, vote)) - .map_err(|_| Error::::MaxVotesReached)?; + ensure!( + votes.len() as u32 <= T::MaxVotes::get(), + Error::::MaxVotesReached + ); + votes.insert(i, (ref_index, vote)); }, } Self::deposit_event(Event::::Voted { voter: who.clone(), ref_index, vote }); @@ -1385,14 +1600,14 @@ impl Pallet { /// Start a referendum fn inject_referendum( end: T::BlockNumber, - proposal: BoundedCallOf, + proposal_hash: T::Hash, threshold: VoteThreshold, delay: T::BlockNumber, ) -> ReferendumIndex { let ref_index = Self::referendum_count(); ReferendumCount::::put(ref_index + 1); let status = - ReferendumStatus { end, proposal, threshold, delay, tally: Default::default() }; + ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); Self::deposit_event(Event::::Started { ref_index, threshold }); @@ -1438,10 +1653,14 @@ impl Pallet { if let Some((depositors, deposit)) = >::take(prop_index) { // refund depositors - for d in depositors.iter() { + for d in &depositors { T::Currency::unreserve(d, deposit); } - Self::deposit_event(Event::::Tabled { proposal_index: prop_index, deposit }); + Self::deposit_event(Event::::Tabled { + proposal_index: prop_index, + deposit, + depositors, + }); Self::inject_referendum( now.saturating_add(T::VotingPeriod::get()), proposal, @@ -1455,35 +1674,71 @@ impl Pallet { } } + fn do_enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { + let preimage = >::take(&proposal_hash); + if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { + if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { + let err_amount = T::Currency::unreserve(&provider, deposit); + debug_assert!(err_amount.is_zero()); + Self::deposit_event(Event::::PreimageUsed { proposal_hash, provider, deposit }); + + let res = proposal + .dispatch(frame_system::RawOrigin::Root.into()) + .map(|_| ()) + .map_err(|e| e.error); + Self::deposit_event(Event::::Executed { ref_index: index, result: res }); + + Ok(()) + } else { + T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); + Self::deposit_event(Event::::PreimageInvalid { + proposal_hash, + ref_index: index, + }); + Err(Error::::PreimageInvalid.into()) + } + } else { + Self::deposit_event(Event::::PreimageMissing { proposal_hash, ref_index: index }); + Err(Error::::PreimageMissing.into()) + } + } + fn bake_referendum( now: T::BlockNumber, index: ReferendumIndex, - status: ReferendumStatus, BalanceOf>, + status: ReferendumStatus>, ) -> bool { let total_issuance = T::Currency::total_issuance(); let approved = status.threshold.approved(status.tally, total_issuance); if approved { Self::deposit_event(Event::::Passed { ref_index: index }); - // Actually `hold` the proposal now since we didn't hold it when it came in via the - // submit extrinsic and we now know that it will be needed. This will be reversed by - // Scheduler pallet once it is executed which assumes that we will already have placed - // a `hold` on it. - T::Preimages::hold(&status.proposal); - - // Earliest it can be scheduled for is next block. - let when = now.saturating_add(status.delay.max(One::one())); - if T::Scheduler::schedule_named( - (DEMOCRACY_ID, index).encode_into(), - DispatchTime::At(when), - None, - 63, - frame_system::RawOrigin::Root.into(), - status.proposal, - ) - .is_err() - { - frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); + if status.delay.is_zero() { + let _ = Self::do_enact_proposal(status.proposal_hash, index); + } else { + let when = now.saturating_add(status.delay); + // Note that we need the preimage now. + Preimages::::mutate_exists( + &status.proposal_hash, + |maybe_pre| match *maybe_pre { + Some(PreimageStatus::Available { ref mut expiry, .. }) => + *expiry = Some(when), + ref mut a => *a = Some(PreimageStatus::Missing(when)), + }, + ); + + if T::Scheduler::schedule_named( + (DEMOCRACY_ID, index).encode(), + DispatchTime::At(when), + None, + 63, + frame_system::RawOrigin::Root.into(), + Call::enact_proposal { proposal_hash: status.proposal_hash, index }.into(), + ) + .is_err() + { + frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); + } } } else { Self::deposit_event(Event::::NotPassed { ref_index: index }); @@ -1506,7 +1761,7 @@ impl Pallet { /// # fn begin_block(now: T::BlockNumber) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; - let mut weight = Weight::zero(); + let mut weight = 0; let next = Self::lowest_unbaked(); let last = Self::referendum_count(); @@ -1519,10 +1774,11 @@ impl Pallet { if Self::launch_next(now).is_ok() { weight = max_block_weight; } else { - weight.saturating_accrue(T::WeightInfo::on_initialize_base_with_launch_period(r)); + weight = + weight.saturating_add(T::WeightInfo::on_initialize_base_with_launch_period(r)); } } else { - weight.saturating_accrue(T::WeightInfo::on_initialize_base(r)); + weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); } // tally up votes for any expiring referenda. @@ -1533,8 +1789,8 @@ impl Pallet { } // Notes: - // * We don't consider the lowest unbaked to be the last maturing in case some referenda - // have a longer voting period than others. + // * We don't consider the lowest unbaked to be the last maturing in case some refendum have + // longer voting period than others. // * The iteration here shouldn't trigger any storage read that are not in cache, due to // `maturing_referenda_at_inner` having already read them. // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number @@ -1560,6 +1816,116 @@ impl Pallet { // `Compact`. decode_compact_u32_at(&>::hashed_key_for(proposal)) } + + /// Check that pre image exists and its value is variant `PreimageStatus::Missing`. + /// + /// This check is done without getting the complete value in the runtime to avoid copying a big + /// value in the runtime. + fn check_pre_image_is_missing(proposal_hash: T::Hash) -> DispatchResult { + // To decode the enum variant we only need the first byte. + let mut buf = [0u8; 1]; + let key = >::hashed_key_for(proposal_hash); + let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or(Error::::NotImminent)?; + // The value may be smaller that 1 byte. + let mut input = &buf[0..buf.len().min(bytes as usize)]; + + match input.read_byte() { + Ok(0) => Ok(()), // PreimageStatus::Missing is variant 0 + Ok(1) => Err(Error::::DuplicatePreimage.into()), + _ => { + sp_runtime::print("Failed to decode `PreimageStatus` variant"); + Err(Error::::NotImminent.into()) + }, + } + } + + /// Check that pre image exists, its value is variant `PreimageStatus::Available` and decode + /// the length of `data: Vec` fields. + /// + /// This check is done without getting the complete value in the runtime to avoid copying a big + /// value in the runtime. + /// + /// If the pre image is missing variant or doesn't exist then the error `PreimageMissing` is + /// returned. + fn pre_image_data_len(proposal_hash: T::Hash) -> Result { + // To decode the `data` field of Available variant we need: + // * one byte for the variant + // * at most 5 bytes to decode a `Compact` + let mut buf = [0u8; 6]; + let key = >::hashed_key_for(proposal_hash); + let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or(Error::::PreimageMissing)?; + // The value may be smaller that 6 bytes. + let mut input = &buf[0..buf.len().min(bytes as usize)]; + + match input.read_byte() { + Ok(1) => (), // Check that input exists and is second variant. + Ok(0) => return Err(Error::::PreimageMissing.into()), + _ => { + sp_runtime::print("Failed to decode `PreimageStatus` variant"); + return Err(Error::::PreimageMissing.into()) + }, + } + + // Decode the length of the vector. + let len = codec::Compact::::decode(&mut input) + .map_err(|_| { + sp_runtime::print("Failed to decode `PreimageStatus` variant"); + DispatchError::from(Error::::PreimageMissing) + })? + .0; + + Ok(len) + } + + // See `note_preimage` + fn note_preimage_inner(who: T::AccountId, encoded_proposal: Vec) -> DispatchResult { + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + ensure!(!>::contains_key(&proposal_hash), Error::::DuplicatePreimage); + + let deposit = >::from(encoded_proposal.len() as u32) + .saturating_mul(T::PreimageByteDeposit::get()); + T::Currency::reserve(&who, deposit)?; + + let now = >::block_number(); + let a = PreimageStatus::Available { + data: encoded_proposal, + provider: who.clone(), + deposit, + since: now, + expiry: None, + }; + >::insert(proposal_hash, a); + + Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit }); + + Ok(()) + } + + // See `note_imminent_preimage` + fn note_imminent_preimage_inner( + who: T::AccountId, + encoded_proposal: Vec, + ) -> DispatchResult { + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + Self::check_pre_image_is_missing(proposal_hash)?; + let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; + let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; + + let now = >::block_number(); + let free = >::zero(); + let a = PreimageStatus::Available { + data: encoded_proposal, + provider: who.clone(), + deposit: Zero::zero(), + since: now, + expiry: Some(expiry), + }; + >::insert(proposal_hash, a); + + Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit: free }); + + Ok(()) + } } /// Decode `Compact` from the trie at given key. diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations.rs deleted file mode 100644 index 3ec249c1d981c..0000000000000 --- a/frame/democracy/src/migrations.rs +++ /dev/null @@ -1,236 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Storage migrations for the preimage pallet. - -use super::*; -use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, BoundedVec}; -use sp_core::H256; - -/// The log target. -const TARGET: &'static str = "runtime::democracy::migration::v1"; - -/// The original data layout of the democracy pallet without a specific version number. -mod v0 { - use super::*; - - #[storage_alias] - pub type PublicProps = StorageValue< - Pallet, - Vec<(PropIndex, ::Hash, ::AccountId)>, - ValueQuery, - >; - - #[storage_alias] - pub type NextExternal = - StorageValue, (::Hash, VoteThreshold)>; - - #[cfg(feature = "try-runtime")] - #[storage_alias] - pub type ReferendumInfoOf = StorageMap< - Pallet, - frame_support::Twox64Concat, - ReferendumIndex, - ReferendumInfo< - ::BlockNumber, - ::Hash, - BalanceOf, - >, - >; -} - -pub mod v1 { - use super::*; - - /// Migration for translating bare `Hash`es into `Bounded`s. - pub struct Migration(sp_std::marker::PhantomData); - - impl> OnRuntimeUpgrade for Migration { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); - - let props_count = v0::PublicProps::::get().len(); - log::info!(target: TARGET, "{} public proposals will be migrated.", props_count,); - ensure!(props_count <= T::MaxProposals::get() as usize, "too many proposals"); - - let referenda_count = v0::ReferendumInfoOf::::iter().count(); - log::info!(target: TARGET, "{} referenda will be migrated.", referenda_count); - - Ok((props_count as u32, referenda_count as u32).encode()) - } - - #[allow(deprecated)] - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - if StorageVersion::get::>() != 0 { - log::warn!( - target: TARGET, - "skipping on_runtime_upgrade: executed on wrong storage version.\ - Expected version 0" - ); - return weight - } - - ReferendumInfoOf::::translate( - |index, old: ReferendumInfo>| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - log::info!(target: TARGET, "migrating referendum #{:?}", &index); - Some(match old { - ReferendumInfo::Ongoing(status) => - ReferendumInfo::Ongoing(ReferendumStatus { - end: status.end, - proposal: Bounded::from_legacy_hash(status.proposal), - threshold: status.threshold, - delay: status.delay, - tally: status.tally, - }), - ReferendumInfo::Finished { approved, end } => - ReferendumInfo::Finished { approved, end }, - }) - }, - ); - - let props = v0::PublicProps::::take() - .into_iter() - .map(|(i, hash, a)| (i, Bounded::from_legacy_hash(hash), a)) - .collect::>(); - let bounded = BoundedVec::<_, T::MaxProposals>::truncate_from(props.clone()); - PublicProps::::put(bounded); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - if props.len() as u32 > T::MaxProposals::get() { - log::error!( - target: TARGET, - "truncated {} public proposals to {}; continuing", - props.len(), - T::MaxProposals::get() - ); - } - - if let Some((hash, threshold)) = v0::NextExternal::::take() { - log::info!(target: TARGET, "migrating next external proposal"); - NextExternal::::put((Bounded::from_legacy_hash(hash), threshold)); - } - - StorageVersion::new(1).put::>(); - - weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); - - let (old_props_count, old_ref_count): (u32, u32) = - Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); - let new_props_count = crate::PublicProps::::get().len() as u32; - assert_eq!(new_props_count, old_props_count, "must migrate all public proposals"); - let new_ref_count = crate::ReferendumInfoOf::::iter().count() as u32; - assert_eq!(new_ref_count, old_ref_count, "must migrate all referenda"); - - log::info!( - target: TARGET, - "{} public proposals migrated, {} referenda migrated", - new_props_count, - new_ref_count, - ); - Ok(()) - } - } -} - -#[cfg(test)] -#[cfg(feature = "try-runtime")] -mod test { - use super::*; - use crate::{ - tests::{Test as T, *}, - types::*, - }; - use frame_support::bounded_vec; - - #[allow(deprecated)] - #[test] - fn migration_works() { - new_test_ext().execute_with(|| { - assert_eq!(StorageVersion::get::>(), 0); - // Insert some values into the v0 storage: - - // Case 1: Ongoing referendum - let hash = H256::repeat_byte(1); - let status = ReferendumStatus { - end: 1u32.into(), - proposal: hash.clone(), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 1u32.into(), - tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, - }; - v0::ReferendumInfoOf::::insert(1u32, ReferendumInfo::Ongoing(status)); - - // Case 2: Finished referendum - v0::ReferendumInfoOf::::insert( - 2u32, - ReferendumInfo::Finished { approved: true, end: 123u32.into() }, - ); - - // Case 3: Public proposals - let hash2 = H256::repeat_byte(2); - v0::PublicProps::::put(vec![ - (3u32, hash.clone(), 123u64), - (4u32, hash2.clone(), 123u64), - ]); - - // Case 4: Next external - v0::NextExternal::::put((hash.clone(), VoteThreshold::SuperMajorityApprove)); - - // Migrate. - let state = v1::Migration::::pre_upgrade().unwrap(); - let _weight = v1::Migration::::on_runtime_upgrade(); - v1::Migration::::post_upgrade(state).unwrap(); - // Check that all values got migrated. - - // Case 1: Ongoing referendum - assert_eq!( - ReferendumInfoOf::::get(1u32), - Some(ReferendumInfo::Ongoing(ReferendumStatus { - end: 1u32.into(), - proposal: Bounded::from_legacy_hash(hash), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 1u32.into(), - tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, - })) - ); - // Case 2: Finished referendum - assert_eq!( - ReferendumInfoOf::::get(2u32), - Some(ReferendumInfo::Finished { approved: true, end: 123u32.into() }) - ); - // Case 3: Public proposals - let props: BoundedVec<_, ::MaxProposals> = bounded_vec![ - (3u32, Bounded::from_legacy_hash(hash), 123u64), - (4u32, Bounded::from_legacy_hash(hash2), 123u64) - ]; - assert_eq!(PublicProps::::get(), props); - // Case 4: Next external - assert_eq!( - NextExternal::::get(), - Some((Bounded::from_legacy_hash(hash), VoteThreshold::SuperMajorityApprove)) - ); - }); - } -} diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index eceb1a3400bba..0fe83a07610d1 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -19,11 +19,11 @@ use super::*; use crate as pallet_democracy; +use codec::Encode; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, - SortedMembers, StorePreimage, + ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, SortedMembers, }, weights::Weight, }; @@ -35,12 +35,14 @@ use sp_runtime::{ traits::{BadOrigin, BlakeTwo256, IdentityLookup}, Perbill, }; + mod cancellation; mod decoders; mod delegation; mod external_proposing; mod fast_tracking; mod lock_voting; +mod preimage; mod public_proposals; mod scheduling; mod voting; @@ -61,7 +63,6 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Preimage: pallet_preimage, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, } @@ -69,31 +70,31 @@ frame_support::construct_runtime!( // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &RuntimeCall) -> bool { - !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) +impl Contains for BaseFilter { + fn contains(call: &Call) -> bool { + !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) } } parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::constants::WEIGHT_PER_SECOND.set_proof_size(u64::MAX)); + frame_system::limits::BlockWeights::simple_max(1_000_000); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; - type BlockWeights = BlockWeights; + type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -108,27 +109,18 @@ impl frame_system::Config for Test { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } - -impl pallet_preimage::Config for Test { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - type Currency = Balances; - type ManagerOrigin = EnsureRoot; - type BaseDeposit = ConstU64<0>; - type ByteDeposit = ConstU64<0>; -} - impl pallet_scheduler::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; + type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = ConstU32<100>; + type MaxScheduledPerBlock = (); type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type Preimages = (); + type PreimageProvider = (); + type NoPreimagePostponement = (); } impl pallet_balances::Config for Test { @@ -136,7 +128,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type MaxLocks = ConstU32<10>; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -164,7 +156,8 @@ impl SortedMembers for OneToFive { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Proposal = Call; + type Event = Event; type Currency = pallet_balances::Pallet; type EnactmentPeriod = ConstU64<2>; type LaunchPeriod = ConstU64<2>; @@ -172,8 +165,6 @@ impl Config for Test { type VoteLockingPeriod = ConstU64<3>; type FastTrackVotingPeriod = ConstU64<2>; type MinimumDeposit = ConstU64<1>; - type MaxDeposits = ConstU32<1000>; - type MaxBlacklisted = ConstU32<5>; type ExternalOrigin = EnsureSignedBy; type ExternalMajorityOrigin = EnsureSignedBy; type ExternalDefaultOrigin = EnsureSignedBy; @@ -183,15 +174,16 @@ impl Config for Test { type CancelProposalOrigin = EnsureRoot; type VetoOrigin = EnsureSignedBy; type CooloffPeriod = ConstU64<2>; + type PreimageByteDeposit = PreimageByteDeposit; type Slash = (); type InstantOrigin = EnsureSignedBy; type InstantAllowed = InstantAllowed; type Scheduler = Scheduler; type MaxVotes = ConstU32<100>; + type OperationalPreimageOrigin = EnsureSignedBy; type PalletsOrigin = OriginCaller; type WeightInfo = (); type MaxProposals = ConstU32<100>; - type Preimages = Preimage; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -209,6 +201,12 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } +/// Execute the function two times, with `true` and with `false`. +pub fn new_test_ext_execute_with_cond(execute: impl FnOnce(bool) -> () + Clone) { + new_test_ext().execute_with(|| (execute.clone())(false)); + new_test_ext().execute_with(|| execute(true)); +} + #[test] fn params_should_work() { new_test_ext().execute_with(|| { @@ -218,22 +216,40 @@ fn params_should_work() { }); } -fn set_balance_proposal(value: u64) -> BoundedCallOf { - let inner = pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }; - let outer = RuntimeCall::Balances(inner); - Preimage::bound(outer).unwrap() +fn set_balance_proposal(value: u64) -> Vec { + Call::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }) + .encode() } #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { - let call = Preimage::realize(&set_balance_proposal(i)).unwrap().0; + let call = Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); assert!(!::BaseCallFilter::contains(&call)); } } +fn set_balance_proposal_hash(value: u64) -> H256 { + BlakeTwo256::hash(&set_balance_proposal(value)[..]) +} + +fn set_balance_proposal_hash_and_note(value: u64) -> H256 { + let p = set_balance_proposal(value); + let h = BlakeTwo256::hash(&p[..]); + match Democracy::note_preimage(Origin::signed(6), p) { + Ok(_) => (), + Err(x) if x == Error::::DuplicatePreimage.into() => (), + Err(x) => panic!("{:?}", x), + } + h +} + fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal(value), delay) + Democracy::propose(Origin::signed(who), set_balance_proposal_hash(value), delay) +} + +fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { + Democracy::propose(Origin::signed(who), set_balance_proposal_hash_and_note(value), delay) } fn next_block() { @@ -250,7 +266,7 @@ fn fast_forward_to(n: u64) { fn begin_referendum() -> ReferendumIndex { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 1)); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); fast_forward_to(2); 0 } diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index ff046d612c026..9035e17c5c80b 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -24,12 +24,12 @@ fn cancel_referendum_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r.into())); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::cancel_referendum(Origin::root(), r.into())); assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); @@ -42,33 +42,54 @@ fn cancel_referendum_should_work() { }); } +#[test] +fn cancel_queued_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + // start of 2 => next referendum scheduled. + fast_forward_to(2); + + assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + + fast_forward_to(4); + + assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); + + assert_noop!(Democracy::cancel_queued(Origin::root(), 1), Error::::ProposalMissing); + assert_ok!(Democracy::cancel_queued(Origin::root(), 0)); + assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); + }); +} + #[test] fn emergency_cancel_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 2, ); assert!(Democracy::referendum_status(r).is_ok()); - assert_noop!(Democracy::emergency_cancel(RuntimeOrigin::signed(3), r), BadOrigin); - assert_ok!(Democracy::emergency_cancel(RuntimeOrigin::signed(4), r)); + assert_noop!(Democracy::emergency_cancel(Origin::signed(3), r), BadOrigin); + assert_ok!(Democracy::emergency_cancel(Origin::signed(4), r)); assert!(Democracy::referendum_info(r).is_none()); // some time later... let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 2, ); assert!(Democracy::referendum_status(r).is_ok()); assert_noop!( - Democracy::emergency_cancel(RuntimeOrigin::signed(4), r), + Democracy::emergency_cancel(Origin::signed(4), r), Error::::AlreadyCanceled, ); }); diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 1c8b9c3d980f9..1fbb88060549b 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -18,10 +18,7 @@ //! The for various partial storage decoders use super::*; -use frame_support::{ - storage::{migration, unhashed}, - BoundedVec, -}; +use frame_support::storage::{migration, unhashed}; #[test] fn test_decode_compact_u32_at() { @@ -45,8 +42,7 @@ fn test_decode_compact_u32_at() { fn len_of_deposit_of() { new_test_ext().execute_with(|| { for l in vec![0, 1, 200, 1000] { - let value: (BoundedVec, u64) = - ((0..l).map(|_| Default::default()).collect::>().try_into().unwrap(), 3u64); + let value: (Vec, u64) = ((0..l).map(|_| Default::default()).collect(), 3u64); DepositOf::::insert(2, value); assert_eq!(Democracy::len_of_deposit_of(2), Some(l)); } @@ -55,3 +51,35 @@ fn len_of_deposit_of() { assert_eq!(Democracy::len_of_deposit_of(2), None); }) } + +#[test] +fn pre_image() { + new_test_ext().execute_with(|| { + let key = Default::default(); + let missing = PreimageStatus::Missing(0); + Preimages::::insert(key, missing); + assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); + assert_eq!(Democracy::check_pre_image_is_missing(key), Ok(())); + + Preimages::::remove(key); + assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); + assert_noop!(Democracy::check_pre_image_is_missing(key), Error::::NotImminent); + + for l in vec![0, 10, 100, 1000u32] { + let available = PreimageStatus::Available { + data: (0..l).map(|i| i as u8).collect(), + provider: 0, + deposit: 0, + since: 0, + expiry: None, + }; + + Preimages::::insert(key, available); + assert_eq!(Democracy::pre_image_data_len(key), Ok(l)); + assert_noop!( + Democracy::check_pre_image_is_missing(key), + Error::::DuplicatePreimage + ); + } + }) +} diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index bca7cb9524112..3551ca8f91123 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -24,38 +24,38 @@ fn single_proposal_should_work_with_delegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 1)); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); fast_forward_to(2); // Delegate first vote. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); let r = 0; - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); // Delegate a second vote. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(3), 1, Conviction::None, 30)); + assert_ok!(Democracy::delegate(Origin::signed(3), 1, Conviction::None, 30)); assert_eq!(tally(r), Tally { ayes: 6, nays: 0, turnout: 60 }); // Reduce first vote. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 10)); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 10)); assert_eq!(tally(r), Tally { ayes: 5, nays: 0, turnout: 50 }); // Second vote delegates to first; we don't do tiered delegation, so it doesn't get used. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(3), 2, Conviction::None, 30)); + assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); assert_eq!(tally(r), Tally { ayes: 2, nays: 0, turnout: 20 }); // Main voter cancels their vote - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(1), r)); + assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); // First delegator delegates half funds with conviction; nothing changes yet. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked1x, 10)); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked1x, 10)); assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); // Main voter reinstates their vote - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 11, nays: 0, turnout: 20 }); }); } @@ -64,7 +64,7 @@ fn single_proposal_should_work_with_delegation() { fn self_delegation_not_allowed() { new_test_ext().execute_with(|| { assert_noop!( - Democracy::delegate(RuntimeOrigin::signed(1), 1, Conviction::None, 10), + Democracy::delegate(Origin::signed(1), 1, Conviction::None, 10), Error::::Nonsense, ); }); @@ -75,19 +75,19 @@ fn cyclic_delegation_should_unwind() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 1)); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); fast_forward_to(2); // Check behavior with cycle. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(3), 2, Conviction::None, 30)); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(1), 3, Conviction::None, 10)); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); + assert_ok!(Democracy::delegate(Origin::signed(1), 3, Conviction::None, 10)); let r = 0; - assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(3))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3))); - assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(1))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(1))); + assert_ok!(Democracy::undelegate(Origin::signed(3))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); + assert_ok!(Democracy::undelegate(Origin::signed(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); // Delegated vote is counted. assert_eq!(tally(r), Tally { ayes: 3, nays: 3, turnout: 60 }); @@ -100,18 +100,18 @@ fn single_proposal_should_work_with_vote_and_delegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 1)); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); fast_forward_to(2); let r = 0; - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, nay(2))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, nay(2))); assert_eq!(tally(r), Tally { ayes: 1, nays: 2, turnout: 30 }); // Delegate vote. - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(2), r)); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); // Delegated vote replaces the explicit vote. assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); }); @@ -122,15 +122,15 @@ fn single_proposal_should_work_with_undelegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 1)); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); // Delegate and undelegate vote. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); - assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(2))); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::undelegate(Origin::signed(2))); fast_forward_to(2); let r = 0; - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); // Delegated vote is not counted. assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); @@ -143,11 +143,11 @@ fn single_proposal_should_work_with_delegation_and_vote() { new_test_ext().execute_with(|| { let r = begin_referendum(); // Delegate, undelegate and vote. - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(2))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(2))); + assert_ok!(Democracy::undelegate(Origin::signed(2))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); // Delegated vote is not counted. assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); }); @@ -159,8 +159,8 @@ fn conviction_should_be_honored_in_delegation() { new_test_ext().execute_with(|| { let r = begin_referendum(); // Delegate and vote. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); // Delegated vote is huge. assert_eq!(tally(r), Tally { ayes: 121, nays: 0, turnout: 30 }); }); @@ -171,12 +171,8 @@ fn split_vote_delegation_should_be_ignored() { // If transactor voted, delegated vote is overwritten. new_test_ext().execute_with(|| { let r = begin_referendum(); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote( - RuntimeOrigin::signed(1), - r, - AccountVote::Split { aye: 10, nay: 0 } - )); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(Origin::signed(1), r, AccountVote::Split { aye: 10, nay: 0 })); // Delegated vote is huge. assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); }); @@ -188,8 +184,8 @@ fn redelegation_keeps_lock() { new_test_ext().execute_with(|| { let r = begin_referendum(); // Delegate and vote. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); // Delegated vote is huge. assert_eq!(tally(r), Tally { ayes: 121, nays: 0, turnout: 30 }); @@ -200,14 +196,14 @@ fn redelegation_keeps_lock() { assert_eq!(VotingOf::::get(2).prior(), &prior_lock); // Delegate someone else at a lower conviction and amount - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 3, Conviction::None, 10)); + assert_ok!(Democracy::delegate(Origin::signed(2), 3, Conviction::None, 10)); // 6x prior should appear w/ locked balance. prior_lock.accumulate(98, 20); assert_eq!(VotingOf::::get(2).prior(), &prior_lock); assert_eq!(VotingOf::::get(2).locked_balance(), 20); // Unlock shouldn't work - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 2)); + assert_ok!(Democracy::unlock(Origin::signed(2), 2)); assert_eq!(VotingOf::::get(2).prior(), &prior_lock); assert_eq!(VotingOf::::get(2).locked_balance(), 20); @@ -215,7 +211,7 @@ fn redelegation_keeps_lock() { // Now unlock can remove the prior lock and reduce the locked amount. assert_eq!(VotingOf::::get(2).prior(), &prior_lock); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 2)); + assert_ok!(Democracy::unlock(Origin::signed(2), 2)); assert_eq!(VotingOf::::get(2).prior(), &vote::PriorLock::default()); assert_eq!(VotingOf::::get(2).locked_balance(), 10); }); diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index 4cfdd2aa74a3d..5d4a9f2a7cbfc 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -23,50 +23,59 @@ use super::*; fn veto_external_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); assert!(>::exists()); - let h = set_balance_proposal(2).hash(); - assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); + let h = set_balance_proposal_hash_and_note(2); + assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), Error::::ProposalBlacklisted ); fast_forward_to(1); // fails as we're still in cooloff period. assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), Error::::ProposalBlacklisted ); fast_forward_to(2); // works; as we're out of the cooloff period. - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); assert!(>::exists()); // 3 can't veto the same thing twice. assert_noop!( - Democracy::veto_external(RuntimeOrigin::signed(3), h), + Democracy::veto_external(Origin::signed(3), h.clone()), Error::::AlreadyVetoed ); // 4 vetoes. - assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(4), h)); + assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); // cancelled again. assert!(!>::exists()); fast_forward_to(3); // same proposal fails as we're still in cooloff assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2)), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), Error::::ProposalBlacklisted ); // different proposal works fine. - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(3),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); }); } @@ -75,16 +84,19 @@ fn external_blacklisting_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); - let hash = set_balance_proposal(2).hash(); - assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); + let hash = set_balance_proposal_hash(2); + assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); fast_forward_to(2); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2)), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash_and_note(2),), Error::::ProposalBlacklisted, ); }); @@ -95,12 +107,15 @@ fn external_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(1), set_balance_proposal(2),), + Democracy::external_propose(Origin::signed(1), set_balance_proposal_hash(2),), BadOrigin, ); - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(1),), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(1),), Error::::DuplicateProposal ); fast_forward_to(2); @@ -108,7 +123,7 @@ fn external_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -122,19 +137,19 @@ fn external_majority_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_majority(RuntimeOrigin::signed(1), set_balance_proposal(2)), + Democracy::external_propose_majority(Origin::signed(1), set_balance_proposal_hash(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_majority( - RuntimeOrigin::signed(3), - set_balance_proposal(2) + Origin::signed(3), + set_balance_proposal_hash_and_note(2) )); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash(2), threshold: VoteThreshold::SimpleMajority, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -148,19 +163,19 @@ fn external_default_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_default(RuntimeOrigin::signed(3), set_balance_proposal(2)), + Democracy::external_propose_default(Origin::signed(3), set_balance_proposal_hash(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_default( - RuntimeOrigin::signed(1), - set_balance_proposal(2) + Origin::signed(1), + set_balance_proposal_hash_and_note(2) )); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash(2), threshold: VoteThreshold::SuperMajorityAgainst, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -173,8 +188,11 @@ fn external_default_referendum_works() { fn external_and_public_interleaving_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(1),)); - assert_ok!(propose_set_balance(6, 2, 2)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(1), + )); + assert_ok!(propose_set_balance_and_note(6, 2, 2)); fast_forward_to(2); @@ -183,14 +201,17 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal: set_balance_proposal(1), + proposal_hash: set_balance_proposal_hash_and_note(1), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish external - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(3),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); fast_forward_to(4); @@ -199,7 +220,7 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(1), Ok(ReferendumStatus { end: 6, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash_and_note(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -214,14 +235,17 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(2), Ok(ReferendumStatus { end: 8, - proposal: set_balance_proposal(3), + proposal_hash: set_balance_proposal_hash_and_note(3), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish external - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(5),)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(5), + )); fast_forward_to(8); @@ -230,15 +254,18 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(3), Ok(ReferendumStatus { end: 10, - proposal: set_balance_proposal(5), + proposal_hash: set_balance_proposal_hash_and_note(5), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish both - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(7),)); - assert_ok!(propose_set_balance(6, 4, 2)); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(7), + )); + assert_ok!(propose_set_balance_and_note(6, 4, 2)); fast_forward_to(10); @@ -247,17 +274,17 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(4), Ok(ReferendumStatus { end: 12, - proposal: set_balance_proposal(4), + proposal_hash: set_balance_proposal_hash_and_note(4), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish public again - assert_ok!(propose_set_balance(6, 6, 2)); + assert_ok!(propose_set_balance_and_note(6, 6, 2)); // cancel external - let h = set_balance_proposal(7).hash(); - assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); + let h = set_balance_proposal_hash_and_note(7); + assert_ok!(Democracy::veto_external(Origin::signed(3), h)); fast_forward_to(12); @@ -266,7 +293,7 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(5), Ok(ReferendumStatus { end: 14, - proposal: set_balance_proposal(6), + proposal_hash: set_balance_proposal_hash_and_note(6), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 97bb7a63908ab..caf83c6d46120 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -23,22 +23,22 @@ use super::*; fn fast_track_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal(2).hash(); + let h = set_balance_proposal_hash_and_note(2); assert_noop!( - Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), + Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( - RuntimeOrigin::signed(3), - set_balance_proposal(2) + Origin::signed(3), + set_balance_proposal_hash_and_note(2) )); - assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); - assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 2, 0)); + assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); + assert_ok!(Democracy::fast_track(Origin::signed(5), h, 2, 0)); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 2, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash_and_note(2), threshold: VoteThreshold::SimpleMajority, delay: 0, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -51,32 +51,32 @@ fn fast_track_referendum_works() { fn instant_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal(2).hash(); + let h = set_balance_proposal_hash_and_note(2); assert_noop!( - Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), + Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( - RuntimeOrigin::signed(3), - set_balance_proposal(2) + Origin::signed(3), + set_balance_proposal_hash_and_note(2) )); - assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); - assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 1, 0), BadOrigin); + assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); + assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); assert_noop!( - Democracy::fast_track(RuntimeOrigin::signed(6), h, 1, 0), + Democracy::fast_track(Origin::signed(6), h, 1, 0), Error::::InstantNotAllowed ); INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); assert_noop!( - Democracy::fast_track(RuntimeOrigin::signed(6), h, 0, 0), + Democracy::fast_track(Origin::signed(6), h, 0, 0), Error::::VotingPeriodLow ); - assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(6), h, 1, 0)); + assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 1, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash_and_note(2), threshold: VoteThreshold::SimpleMajority, delay: 0, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -93,7 +93,7 @@ fn instant_next_block_referendum_backed() { let majority_origin_id = 3; let instant_origin_id = 6; let voting_period = 1; - let proposal = set_balance_proposal(2); + let proposal_hash = set_balance_proposal_hash_and_note(2); let delay = 2; // has no effect on test // init @@ -102,14 +102,14 @@ fn instant_next_block_referendum_backed() { // propose with majority origin assert_ok!(Democracy::external_propose_majority( - RuntimeOrigin::signed(majority_origin_id), - proposal.clone() + Origin::signed(majority_origin_id), + proposal_hash )); // fast track with instant origin and voting period pointing to the next block assert_ok!(Democracy::fast_track( - RuntimeOrigin::signed(instant_origin_id), - proposal.hash(), + Origin::signed(instant_origin_id), + proposal_hash, voting_period, delay )); @@ -119,7 +119,7 @@ fn instant_next_block_referendum_backed() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: start_block_number + voting_period, - proposal, + proposal_hash, threshold: VoteThreshold::SimpleMajority, delay, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -143,10 +143,13 @@ fn instant_next_block_referendum_backed() { fn fast_track_referendum_fails_when_no_simple_majority() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal(2).hash(); - assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); + let h = set_balance_proposal_hash_and_note(2); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2) + )); assert_noop!( - Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), + Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::NotSimpleMajority ); }); diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index 540198ecf33a1..0718734367314 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -43,15 +43,15 @@ fn lock_voting_should_work() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(5, 10))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(4, 20))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3, 30))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, aye(2, 40))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, nay(1, 50))); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::vote(Origin::signed(4), r, aye(2, 40))); + assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); // All balances are currently locked. @@ -59,23 +59,23 @@ fn lock_voting_should_work() { assert_eq!(Balances::locks(i), vec![the_lock(i * 10)]); } - fast_forward_to(3); + fast_forward_to(2); // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(1), r)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 1)); + assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 1)); // Anyone can reap and unlock anyone else's in this context. - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(2), 5, r)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 5)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 5, r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 5)); // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. assert_noop!( - Democracy::remove_other_vote(RuntimeOrigin::signed(1), 2, r), + Democracy::remove_other_vote(Origin::signed(1), 2, r), Error::::NoPermission ); // However, they can be unvoted by the owner, though it will make no difference to the lock. - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(2), r)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 2)); + assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 2)); assert_eq!(Balances::locks(1), vec![]); assert_eq!(Balances::locks(2), vec![the_lock(20)]); @@ -87,35 +87,35 @@ fn lock_voting_should_work() { fast_forward_to(7); // No change yet... assert_noop!( - Democracy::remove_other_vote(RuntimeOrigin::signed(1), 4, r), + Democracy::remove_other_vote(Origin::signed(1), 4, r), Error::::NoPermission ); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 4)); + assert_ok!(Democracy::unlock(Origin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![the_lock(40)]); fast_forward_to(8); // 4 should now be able to reap and unlock - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 4, r)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 4)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 4, r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![]); fast_forward_to(13); assert_noop!( - Democracy::remove_other_vote(RuntimeOrigin::signed(1), 3, r), + Democracy::remove_other_vote(Origin::signed(1), 3, r), Error::::NoPermission ); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 3)); + assert_ok!(Democracy::unlock(Origin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![the_lock(30)]); fast_forward_to(14); - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 3, r)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 3)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 3, r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![]); // 2 doesn't need to reap_vote here because it was already done before. fast_forward_to(25); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 2)); + assert_ok!(Democracy::unlock(Origin::signed(1), 2)); assert_eq!(Balances::locks(2), vec![the_lock(20)]); fast_forward_to(26); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 2)); + assert_ok!(Democracy::unlock(Origin::signed(1), 2)); assert_eq!(Balances::locks(2), vec![]); }); } @@ -126,17 +126,17 @@ fn no_locks_without_conviction_should_work() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(0, 10))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(0, 10))); - fast_forward_to(3); + fast_forward_to(2); assert_eq!(Balances::free_balance(42), 2); - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(2), 1, r)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 1)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 1, r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 1)); assert_eq!(Balances::locks(1), vec![]); }); } @@ -146,15 +146,15 @@ fn lock_voting_should_work_with_delegation() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(5, 10))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(4, 20))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3, 30))); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(4), 2, Conviction::Locked2x, 40)); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, nay(1, 50))); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::delegate(Origin::signed(4), 2, Conviction::Locked2x, 40)); + assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); @@ -167,17 +167,29 @@ fn lock_voting_should_work_with_delegation() { fn setup_three_referenda() -> (u32, u32, u32) { System::set_block_number(0); - let r1 = - Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r1, aye(4, 10))); - - let r2 = - Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r2, aye(3, 20))); - - let r3 = - Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r3, aye(2, 50))); + let r1 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); + + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); + + let r3 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); fast_forward_to(2); @@ -194,36 +206,36 @@ fn prior_lockvotes_should_be_enforced() { fast_forward_to(7); assert_noop!( - Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.2), + Democracy::remove_other_vote(Origin::signed(1), 5, r.2), Error::::NoPermission ); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); fast_forward_to(8); - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.2)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(13); assert_noop!( - Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.1), + Democracy::remove_other_vote(Origin::signed(1), 5, r.1), Error::::NoPermission ); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(14); - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.1)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(25); assert_noop!( - Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.0), + Democracy::remove_other_vote(Origin::signed(1), 5, r.0), Error::::NoPermission ); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(26); - assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.0)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -237,27 +249,27 @@ fn single_consolidation_of_lockvotes_should_work_as_before() { // r.2 locked 50 until 2 + 2 * 3 = #8 fast_forward_to(7); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.2)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); fast_forward_to(8); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(13); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.1)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(14); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(25); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.0)); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(26); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -270,20 +282,20 @@ fn multi_consolidation_of_lockvotes_should_be_conservative() { // r.1 locked 20 until 2 + 4 * 3 = #14 // r.2 locked 50 until 2 + 2 * 3 = #8 - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.2)); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.1)); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.0)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); fast_forward_to(8); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 20); fast_forward_to(14); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(26); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -294,36 +306,36 @@ fn locks_should_persist_from_voting_to_delegation() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, aye(4, 10))); + assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); fast_forward_to(2); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); // locked 10 until #26. - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(5), 1, Conviction::Locked3x, 20)); + assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked3x, 20)); // locked 20. assert!(Balances::locks(5)[0].amount == 20); - assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(5))); + assert_ok!(Democracy::undelegate(Origin::signed(5))); // locked 20 until #14 fast_forward_to(13); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount == 20); fast_forward_to(14); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(25); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(26); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -332,8 +344,8 @@ fn locks_should_persist_from_voting_to_delegation() { fn locks_should_persist_from_delegation_to_voting() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::delegate(RuntimeOrigin::signed(5), 1, Conviction::Locked5x, 5)); - assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(5))); + assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked5x, 5)); + assert_ok!(Democracy::undelegate(Origin::signed(5))); // locked 5 until 16 * 3 = #48 let r = setup_three_referenda(); @@ -341,24 +353,24 @@ fn locks_should_persist_from_delegation_to_voting() { // r.1 locked 20 until 2 + 4 * 3 = #14 // r.2 locked 50 until 2 + 2 * 3 = #8 - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.2)); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.1)); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.0)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); fast_forward_to(8); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 20); fast_forward_to(14); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(26); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 5); fast_forward_to(48); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs new file mode 100644 index 0000000000000..21303c8eddae3 --- /dev/null +++ b/frame/democracy/src/tests/preimage.rs @@ -0,0 +1,219 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The preimage tests. + +use super::*; + +#[test] +fn missing_preimage_should_fail() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); +} + +#[test] +fn preimage_deposit_should_be_required_and_returned() { + new_test_ext_execute_with_cond(|operational| { + // fee of 100 is too much. + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); + assert_noop!( + if operational { + Democracy::note_preimage_operational(Origin::signed(6), vec![0; 500]) + } else { + Democracy::note_preimage(Origin::signed(6), vec![0; 500]) + }, + BalancesError::::InsufficientBalance, + ); + // fee of 1 is reasonable. + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + next_block(); + + assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(6), 60); + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn preimage_deposit_should_be_reapable_earlier_by_owner() { + new_test_ext_execute_with_cond(|operational| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + assert_ok!(if operational { + Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + } else { + Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + }); + + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::MAX), + Error::::TooEarly + ); + next_block(); + assert_ok!(Democracy::reap_preimage( + Origin::signed(6), + set_balance_proposal_hash(2), + u32::MAX + )); + + assert_eq!(Balances::free_balance(6), 60); + assert_eq!(Balances::reserved_balance(6), 0); + }); +} + +#[test] +fn preimage_deposit_should_be_reapable() { + new_test_ext_execute_with_cond(|operational| { + assert_noop!( + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Error::::PreimageMissing + ); + + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + assert_ok!(if operational { + Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + } else { + Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + }); + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + next_block(); + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Error::::TooEarly + ); + + next_block(); + assert_ok!(Democracy::reap_preimage( + Origin::signed(5), + set_balance_proposal_hash(2), + u32::MAX + )); + assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(6), 48); + assert_eq!(Balances::free_balance(5), 62); + }); +} + +#[test] +fn noting_imminent_preimage_for_free_should_work() { + new_test_ext_execute_with_cond(|operational| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 1, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_noop!( + if operational { + Democracy::note_imminent_preimage_operational( + Origin::signed(6), + set_balance_proposal(2), + ) + } else { + Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)) + }, + Error::::NotImminent + ); + + next_block(); + + // Now we're in the dispatch queue it's all good. + assert_ok!(Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2))); + + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn reaping_imminent_preimage_should_fail() { + new_test_ext().execute_with(|| { + let h = set_balance_proposal_hash_and_note(2); + let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + next_block(); + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), h, u32::MAX), + Error::::Imminent + ); + }); +} + +#[test] +fn note_imminent_preimage_can_only_be_successful_once() { + new_test_ext().execute_with(|| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 1, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + next_block(); + + // First time works + assert_ok!(Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2))); + + // Second time fails + assert_noop!( + Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)), + Error::::DuplicatePreimage + ); + + // Fails from any user + assert_noop!( + Democracy::note_imminent_preimage(Origin::signed(5), set_balance_proposal(2)), + Error::::DuplicatePreimage + ); + }); +} diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index f48824dc95c5d..db06696ca5c95 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -22,9 +22,9 @@ use super::*; #[test] fn backing_for_should_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance(1, 2, 2)); - assert_ok!(propose_set_balance(1, 4, 4)); - assert_ok!(propose_set_balance(1, 3, 3)); + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 3, 3)); assert_eq!(Democracy::backing_for(0), Some(2)); assert_eq!(Democracy::backing_for(1), Some(4)); assert_eq!(Democracy::backing_for(2), Some(3)); @@ -34,11 +34,11 @@ fn backing_for_should_work() { #[test] fn deposit_for_proposals_should_be_taken() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance(1, 2, 5)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); assert_eq!(Balances::free_balance(5), 35); @@ -48,11 +48,11 @@ fn deposit_for_proposals_should_be_taken() { #[test] fn deposit_for_proposals_should_be_returned() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance(1, 2, 5)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); fast_forward_to(3); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 20); @@ -77,21 +77,29 @@ fn poor_proposer_should_not_work() { #[test] fn poor_seconder_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance(2, 2, 11)); + assert_ok!(propose_set_balance_and_note(2, 2, 11)); assert_noop!( - Democracy::second(RuntimeOrigin::signed(1), 0), + Democracy::second(Origin::signed(1), 0, u32::MAX), BalancesError::::InsufficientBalance ); }); } +#[test] +fn invalid_seconds_upper_bound_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_noop!(Democracy::second(Origin::signed(2), 0, 0), Error::::WrongUpperBound); + }); +} + #[test] fn cancel_proposal_should_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance(1, 2, 2)); - assert_ok!(propose_set_balance(1, 4, 4)); - assert_noop!(Democracy::cancel_proposal(RuntimeOrigin::signed(1), 0), BadOrigin); - assert_ok!(Democracy::cancel_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_noop!(Democracy::cancel_proposal(Origin::signed(1), 0), BadOrigin); + assert_ok!(Democracy::cancel_proposal(Origin::root(), 0)); System::assert_last_event(crate::Event::ProposalCanceled { prop_index: 0 }.into()); assert_eq!(Democracy::backing_for(0), None); assert_eq!(Democracy::backing_for(1), Some(4)); @@ -102,24 +110,24 @@ fn cancel_proposal_should_work() { fn blacklisting_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - let hash = set_balance_proposal(2).hash(); + let hash = set_balance_proposal_hash(2); - assert_ok!(propose_set_balance(1, 2, 2)); - assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_noop!(Democracy::blacklist(RuntimeOrigin::signed(1), hash, None), BadOrigin); - assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); + assert_noop!(Democracy::blacklist(Origin::signed(1), hash.clone(), None), BadOrigin); + assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); assert_eq!(Democracy::backing_for(0), None); assert_eq!(Democracy::backing_for(1), Some(4)); - assert_noop!(propose_set_balance(1, 2, 2), Error::::ProposalBlacklisted); + assert_noop!(propose_set_balance_and_note(1, 2, 2), Error::::ProposalBlacklisted); fast_forward_to(2); - let hash = set_balance_proposal(4).hash(); + let hash = set_balance_proposal_hash(4); assert_ok!(Democracy::referendum_status(0)); - assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, Some(0))); + assert_ok!(Democracy::blacklist(Origin::root(), hash, Some(0))); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); }); } @@ -128,14 +136,14 @@ fn blacklisting_should_work() { fn runners_up_should_come_after() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 2)); - assert_ok!(propose_set_balance(1, 4, 4)); - assert_ok!(propose_set_balance(1, 3, 3)); + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 3, 3)); fast_forward_to(2); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); fast_forward_to(4); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 1, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), 1, aye(1))); fast_forward_to(6); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 2, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), 2, aye(1))); }); } diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 5e133f38945d6..d28f24d76bb5b 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -24,11 +24,11 @@ fn simple_passing_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); @@ -43,11 +43,11 @@ fn simple_failing_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); assert_eq!(tally(r), Tally { ayes: 0, nays: 1, turnout: 10 }); next_block(); @@ -62,28 +62,26 @@ fn ooo_inject_referendums_should_work() { new_test_ext().execute_with(|| { let r1 = Democracy::inject_referendum( 3, - set_balance_proposal(3), + set_balance_proposal_hash_and_note(3), VoteThreshold::SuperMajorityApprove, 0, ); let r2 = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r2, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); assert_eq!(tally(r2), Tally { ayes: 1, nays: 0, turnout: 10 }); next_block(); + assert_eq!(Balances::free_balance(42), 2); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r1, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); assert_eq!(tally(r1), Tally { ayes: 1, nays: 0, turnout: 10 }); - next_block(); - assert_eq!(Balances::free_balance(42), 2); - next_block(); assert_eq!(Balances::free_balance(42), 3); }); @@ -94,16 +92,16 @@ fn delayed_enactment_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 1, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(2))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, aye(4))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, aye(5))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, aye(6))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); + assert_ok!(Democracy::vote(Origin::signed(4), r, aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, aye(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, aye(6))); assert_eq!(tally(r), Tally { ayes: 21, nays: 0, turnout: 210 }); @@ -120,42 +118,39 @@ fn lowest_unbaked_should_be_sensible() { new_test_ext().execute_with(|| { let r1 = Democracy::inject_referendum( 3, - set_balance_proposal(1), + set_balance_proposal_hash_and_note(1), VoteThreshold::SuperMajorityApprove, 0, ); let r2 = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); let r3 = Democracy::inject_referendum( 10, - set_balance_proposal(3), + set_balance_proposal_hash_and_note(3), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r1, aye(1))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r2, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); // r3 is canceled - assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r3.into())); + assert_ok!(Democracy::cancel_referendum(Origin::root(), r3.into())); assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); - // r2 ends with approval - assert_eq!(Democracy::lowest_unbaked(), 0); - next_block(); - // r1 ends with approval - assert_eq!(Democracy::lowest_unbaked(), 3); - assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); - - // r2 is executed + // r2 is approved assert_eq!(Balances::free_balance(42), 2); + assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); - // r1 is executed + + // r1 is approved assert_eq!(Balances::free_balance(42), 1); + assert_eq!(Democracy::lowest_unbaked(), 3); + assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); }); } diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 482cd430e0e7f..d4fceaf0ee489 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -24,7 +24,7 @@ fn overvoting_should_fail() { new_test_ext().execute_with(|| { let r = begin_referendum(); assert_noop!( - Democracy::vote(RuntimeOrigin::signed(1), r, aye(2)), + Democracy::vote(Origin::signed(1), r, aye(2)), Error::::InsufficientFunds ); }); @@ -35,12 +35,9 @@ fn split_voting_should_work() { new_test_ext().execute_with(|| { let r = begin_referendum(); let v = AccountVote::Split { aye: 40, nay: 20 }; - assert_noop!( - Democracy::vote(RuntimeOrigin::signed(5), r, v), - Error::::InsufficientFunds - ); + assert_noop!(Democracy::vote(Origin::signed(5), r, v), Error::::InsufficientFunds); let v = AccountVote::Split { aye: 30, nay: 20 }; - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, v)); + assert_ok!(Democracy::vote(Origin::signed(5), r, v)); assert_eq!(tally(r), Tally { ayes: 3, nays: 2, turnout: 50 }); }); @@ -51,10 +48,10 @@ fn split_vote_cancellation_should_work() { new_test_ext().execute_with(|| { let r = begin_referendum(); let v = AccountVote::Split { aye: 30, nay: 20 }; - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, v)); - assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r)); + assert_ok!(Democracy::vote(Origin::signed(5), r, v)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); - assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -63,20 +60,20 @@ fn split_vote_cancellation_should_work() { fn single_proposal_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 1)); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); let r = 0; assert!(Democracy::referendum_info(r).is_none()); // start of 2 => next referendum scheduled. fast_forward_to(2); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(Democracy::referendum_count(), 1); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal: set_balance_proposal(2), + proposal_hash: set_balance_proposal_hash_and_note(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 1, nays: 0, turnout: 10 }, @@ -106,17 +103,17 @@ fn controversial_voting_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, big_aye(1))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, big_nay(2))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, big_nay(3))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, big_aye(4))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, big_aye(6))); + assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, big_nay(2))); + assert_ok!(Democracy::vote(Origin::signed(3), r, big_nay(3))); + assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); assert_eq!(tally(r), Tally { ayes: 110, nays: 100, turnout: 210 }); @@ -132,12 +129,12 @@ fn controversial_low_turnout_voting_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, big_aye(6))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); assert_eq!(tally(r), Tally { ayes: 60, nays: 50, turnout: 110 }); @@ -156,13 +153,13 @@ fn passing_low_turnout_voting_should_work() { let r = Democracy::inject_referendum( 2, - set_balance_proposal(2), + set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, big_aye(4))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, big_aye(6))); + assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); assert_eq!(tally(r), Tally { ayes: 100, nays: 50, turnout: 150 }); next_block(); diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs index 4b7f1a0fac45c..52ab8a40eb3e3 100644 --- a/frame/democracy/src/types.rs +++ b/frame/democracy/src/types.rs @@ -18,7 +18,7 @@ //! Miscellaneous additional datatypes. use crate::{AccountVote, Conviction, Vote, VoteThreshold}; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero}, @@ -26,7 +26,7 @@ use sp_runtime::{ }; /// Info regarding an ongoing referendum. -#[derive(Encode, MaxEncodedLen, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Tally { /// The number of aye votes, expressed in terms of post-conviction lock-vote. pub ayes: Balance, @@ -37,9 +37,7 @@ pub struct Tally { } /// Amount of votes and capital placed in delegation for an account. -#[derive( - Encode, MaxEncodedLen, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, -)] +#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Delegations { /// The number of votes (this is post-conviction). pub votes: Balance, @@ -162,12 +160,12 @@ impl< } /// Info regarding an ongoing referendum. -#[derive(Encode, MaxEncodedLen, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct ReferendumStatus { +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct ReferendumStatus { /// When voting on this referendum will end. pub end: BlockNumber, - /// The proposal being voted on. - pub proposal: Proposal, + /// The hash of the proposal being voted on. + pub proposal_hash: Hash, /// The thresholding mechanism to determine whether it passed. pub threshold: VoteThreshold, /// The delay (in blocks) to wait after a successful referendum before deploying. @@ -177,23 +175,23 @@ pub struct ReferendumStatus { } /// Info regarding a referendum, present or past. -#[derive(Encode, MaxEncodedLen, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub enum ReferendumInfo { +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub enum ReferendumInfo { /// Referendum is happening, the arg is the block number at which it will end. - Ongoing(ReferendumStatus), + Ongoing(ReferendumStatus), /// Referendum finished at `end`, and has been `approved` or rejected. Finished { approved: bool, end: BlockNumber }, } -impl ReferendumInfo { +impl ReferendumInfo { /// Create a new instance. pub fn new( end: BlockNumber, - proposal: Proposal, + proposal_hash: Hash, threshold: VoteThreshold, delay: BlockNumber, ) -> Self { - let s = ReferendumStatus { end, proposal, threshold, delay, tally: Tally::default() }; + let s = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Tally::default() }; ReferendumInfo::Ongoing(s) } } diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index 122f54febd8cf..c74623d4dfeb8 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -18,12 +18,11 @@ //! The vote datatype. use crate::{Conviction, Delegations, ReferendumIndex}; -use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen, Output}; -use frame_support::traits::Get; +use codec::{Decode, Encode, EncodeLike, Input, Output}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, - BoundedVec, RuntimeDebug, + RuntimeDebug, }; use sp_std::prelude::*; @@ -40,12 +39,6 @@ impl Encode for Vote { } } -impl MaxEncodedLen for Vote { - fn max_encoded_len() -> usize { - 1 - } -} - impl EncodeLike for Vote {} impl Decode for Vote { @@ -73,7 +66,7 @@ impl TypeInfo for Vote { } /// A vote for a referendum of a particular account. -#[derive(Encode, MaxEncodedLen, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum AccountVote { /// A standard vote, one-way (approve or reject) with a given amount of conviction. Standard { vote: Vote, balance: Balance }, @@ -114,18 +107,7 @@ impl AccountVote { /// A "prior" lock, i.e. a lock for some now-forgotten reason. #[derive( - Encode, - MaxEncodedLen, - Decode, - Default, - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - RuntimeDebug, - TypeInfo, + Encode, Decode, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, )] pub struct PriorLock(BlockNumber, Balance); @@ -149,15 +131,13 @@ impl PriorLock> { +#[derive(Encode, Decode, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub enum Voting { /// The account is voting directly. `delegations` is the total amount of post-conviction voting /// weight that it controls from those that have delegated to it. Direct { /// The current votes of the account. - votes: BoundedVec<(ReferendumIndex, AccountVote), MaxVotes>, + votes: Vec<(ReferendumIndex, AccountVote)>, /// The total amount of delegations that this account has received. delegations: Delegations, /// Any pre-existing locks from past voting/delegating activity. @@ -175,24 +155,20 @@ pub enum Voting> { }, } -impl> Default - for Voting +impl Default + for Voting { fn default() -> Self { Voting::Direct { - votes: Default::default(), + votes: Vec::new(), delegations: Default::default(), prior: PriorLock(Zero::zero(), Default::default()), } } } -impl< - Balance: Saturating + Ord + Zero + Copy, - BlockNumber: Ord + Copy + Zero, - AccountId, - MaxVotes: Get, - > Voting +impl + Voting { pub fn rejig(&mut self, now: BlockNumber) { match self { diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index e8ef91def9820..443d6b1166198 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -18,7 +18,7 @@ //! Voting thresholds. use crate::Tally; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -26,9 +26,7 @@ use sp_runtime::traits::{IntegerSquareRoot, Zero}; use sp_std::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. -#[derive( - Clone, Copy, PartialEq, Eq, Encode, MaxEncodedLen, Decode, sp_runtime::RuntimeDebug, TypeInfo, -)] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum VoteThreshold { /// A supermajority of approvals is needed to pass this vote. diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index db3969d400b97..45686b43f7152 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/democracy/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/democracy/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,23 +45,27 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_democracy. pub trait WeightInfo { fn propose() -> Weight; - fn second() -> Weight; - fn vote_new() -> Weight; - fn vote_existing() -> Weight; + fn second(s: u32, ) -> Weight; + fn vote_new(r: u32, ) -> Weight; + fn vote_existing(r: u32, ) -> Weight; fn emergency_cancel() -> Weight; - fn blacklist() -> Weight; - fn external_propose() -> Weight; + fn blacklist(p: u32, ) -> Weight; + fn external_propose(v: u32, ) -> Weight; fn external_propose_majority() -> Weight; fn external_propose_default() -> Weight; fn fast_track() -> Weight; - fn veto_external() -> Weight; - fn cancel_proposal() -> Weight; + fn veto_external(v: u32, ) -> Weight; + fn cancel_proposal(p: u32, ) -> Weight; fn cancel_referendum() -> Weight; + fn cancel_queued(r: u32, ) -> Weight; fn on_initialize_base(r: u32, ) -> Weight; fn on_initialize_base_with_launch_period(r: u32, ) -> Weight; fn delegate(r: u32, ) -> Weight; fn undelegate(r: u32, ) -> Weight; fn clear_public_proposals() -> Weight; + fn note_preimage(b: u32, ) -> Weight; + fn note_imminent_preimage(b: u32, ) -> Weight; + fn reap_preimage(b: u32, ) -> Weight; fn unlock_remove(r: u32, ) -> Weight; fn unlock_set(r: u32, ) -> Weight; fn remove_vote(r: u32, ) -> Weight; @@ -79,215 +80,233 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - // Minimum execution time: 56_868 nanoseconds. - Weight::from_ref_time(57_788_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (48_328_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second() -> Weight { - // Minimum execution time: 49_328 nanoseconds. - Weight::from_ref_time(49_764_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn second(s: u32, ) -> Weight { + (30_923_000 as Weight) + // Standard Error: 1_000 + .saturating_add((142_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new() -> Weight { - // Minimum execution time: 60_323 nanoseconds. - Weight::from_ref_time(61_389_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn vote_new(r: u32, ) -> Weight { + (40_345_000 as Weight) + // Standard Error: 1_000 + .saturating_add((140_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing() -> Weight { - // Minimum execution time: 60_612 nanoseconds. - Weight::from_ref_time(61_282_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn vote_existing(r: u32, ) -> Weight { + (39_853_000 as Weight) + // Standard Error: 1_000 + .saturating_add((150_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - // Minimum execution time: 24_780 nanoseconds. - Weight::from_ref_time(25_194_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (19_364_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Democracy PublicProps (r:1 w:1) - // Storage: Democracy DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) - fn blacklist() -> Weight { - // Minimum execution time: 85_177 nanoseconds. - Weight::from_ref_time(91_733_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn blacklist(p: u32, ) -> Weight { + (57_708_000 as Weight) + // Standard Error: 4_000 + .saturating_add((192_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose() -> Weight { - // Minimum execution time: 19_483 nanoseconds. - Weight::from_ref_time(19_914_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn external_propose(v: u32, ) -> Weight { + (10_714_000 as Weight) + // Standard Error: 0 + .saturating_add((33_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - // Minimum execution time: 4_963 nanoseconds. - Weight::from_ref_time(5_250_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_697_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - // Minimum execution time: 5_075 nanoseconds. - Weight::from_ref_time(5_187_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_831_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - // Minimum execution time: 23_956 nanoseconds. - Weight::from_ref_time(24_814_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (20_271_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external() -> Weight { - // Minimum execution time: 31_472 nanoseconds. - Weight::from_ref_time(31_770_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + fn veto_external(v: u32, ) -> Weight { + (21_319_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal() -> Weight { - // Minimum execution time: 73_811 nanoseconds. - Weight::from_ref_time(78_943_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn cancel_proposal(p: u32, ) -> Weight { + (43_960_000 as Weight) + // Standard Error: 2_000 + .saturating_add((184_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - // Minimum execution time: 16_074 nanoseconds. - Weight::from_ref_time(16_409_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (13_475_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn cancel_queued(r: u32, ) -> Weight { + (24_320_000 as Weight) + // Standard Error: 1_000 + .saturating_add((560_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:2 w:0) - /// The range of component `r` is `[0, 99]`. + // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - // Minimum execution time: 7_430 nanoseconds. - Weight::from_ref_time(12_086_064 as u64) - // Standard Error: 3_474 - .saturating_add(Weight::from_ref_time(2_283_457 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_428_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_171_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy LastTabledWasExternal (r:1 w:0) // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:2 w:0) - /// The range of component `r` is `[0, 99]`. + // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - // Minimum execution time: 9_882 nanoseconds. - Weight::from_ref_time(14_566_711 as u64) - // Standard Error: 3_354 - .saturating_add(Weight::from_ref_time(2_282_038 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (7_867_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_177_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy VotingOf (r:3 w:3) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: Democracy ReferendumInfoOf (r:2 w:2) - /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - // Minimum execution time: 48_840 nanoseconds. - Weight::from_ref_time(56_403_092 as u64) - // Standard Error: 6_093 - .saturating_add(Weight::from_ref_time(3_344_243 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(r as u64))) + (37_902_000 as Weight) + // Standard Error: 4_000 + .saturating_add((4_335_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } // Storage: Democracy VotingOf (r:2 w:2) - // Storage: Democracy ReferendumInfoOf (r:2 w:2) - /// The range of component `r` is `[0, 99]`. + // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - // Minimum execution time: 30_483 nanoseconds. - Weight::from_ref_time(32_035_405 as u64) - // Standard Error: 4_383 - .saturating_add(Weight::from_ref_time(3_347_667 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(r as u64))) + (21_272_000 as Weight) + // Standard Error: 3_000 + .saturating_add((4_351_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - // Minimum execution time: 6_421 nanoseconds. - Weight::from_ref_time(6_638_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (4_913_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_preimage(b: u32, ) -> Weight { + (27_986_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_imminent_preimage(b: u32, ) -> Weight { + (20_058_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + // Storage: System Account (r:1 w:0) + fn reap_preimage(b: u32, ) -> Weight { + (28_619_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - // Minimum execution time: 30_291 nanoseconds. - Weight::from_ref_time(37_071_950 as u64) - // Standard Error: 1_619 - .saturating_add(Weight::from_ref_time(59_302 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (26_619_000 as Weight) + // Standard Error: 1_000 + .saturating_add((56_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - // Minimum execution time: 34_888 nanoseconds. - Weight::from_ref_time(36_418_789 as u64) - // Standard Error: 906 - .saturating_add(Weight::from_ref_time(109_602 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (25_373_000 as Weight) + // Standard Error: 1_000 + .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - // Minimum execution time: 18_739 nanoseconds. - Weight::from_ref_time(21_004_077 as u64) - // Standard Error: 1_075 - .saturating_add(Weight::from_ref_time(116_457 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (15_961_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - // Minimum execution time: 18_514 nanoseconds. - Weight::from_ref_time(21_030_667 as u64) - // Standard Error: 1_102 - .saturating_add(Weight::from_ref_time(118_039 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (15_992_000 as Weight) + // Standard Error: 1_000 + .saturating_add((113_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -298,214 +317,232 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - // Minimum execution time: 56_868 nanoseconds. - Weight::from_ref_time(57_788_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (48_328_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second() -> Weight { - // Minimum execution time: 49_328 nanoseconds. - Weight::from_ref_time(49_764_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn second(s: u32, ) -> Weight { + (30_923_000 as Weight) + // Standard Error: 1_000 + .saturating_add((142_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new() -> Weight { - // Minimum execution time: 60_323 nanoseconds. - Weight::from_ref_time(61_389_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn vote_new(r: u32, ) -> Weight { + (40_345_000 as Weight) + // Standard Error: 1_000 + .saturating_add((140_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing() -> Weight { - // Minimum execution time: 60_612 nanoseconds. - Weight::from_ref_time(61_282_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn vote_existing(r: u32, ) -> Weight { + (39_853_000 as Weight) + // Standard Error: 1_000 + .saturating_add((150_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - // Minimum execution time: 24_780 nanoseconds. - Weight::from_ref_time(25_194_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (19_364_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Democracy PublicProps (r:1 w:1) - // Storage: Democracy DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) - fn blacklist() -> Weight { - // Minimum execution time: 85_177 nanoseconds. - Weight::from_ref_time(91_733_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn blacklist(p: u32, ) -> Weight { + (57_708_000 as Weight) + // Standard Error: 4_000 + .saturating_add((192_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose() -> Weight { - // Minimum execution time: 19_483 nanoseconds. - Weight::from_ref_time(19_914_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn external_propose(v: u32, ) -> Weight { + (10_714_000 as Weight) + // Standard Error: 0 + .saturating_add((33_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - // Minimum execution time: 4_963 nanoseconds. - Weight::from_ref_time(5_250_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_697_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - // Minimum execution time: 5_075 nanoseconds. - Weight::from_ref_time(5_187_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_831_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - // Minimum execution time: 23_956 nanoseconds. - Weight::from_ref_time(24_814_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (20_271_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external() -> Weight { - // Minimum execution time: 31_472 nanoseconds. - Weight::from_ref_time(31_770_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + fn veto_external(v: u32, ) -> Weight { + (21_319_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal() -> Weight { - // Minimum execution time: 73_811 nanoseconds. - Weight::from_ref_time(78_943_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn cancel_proposal(p: u32, ) -> Weight { + (43_960_000 as Weight) + // Standard Error: 2_000 + .saturating_add((184_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - // Minimum execution time: 16_074 nanoseconds. - Weight::from_ref_time(16_409_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (13_475_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn cancel_queued(r: u32, ) -> Weight { + (24_320_000 as Weight) + // Standard Error: 1_000 + .saturating_add((560_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:2 w:0) - /// The range of component `r` is `[0, 99]`. + // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - // Minimum execution time: 7_430 nanoseconds. - Weight::from_ref_time(12_086_064 as u64) - // Standard Error: 3_474 - .saturating_add(Weight::from_ref_time(2_283_457 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_428_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_171_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy LastTabledWasExternal (r:1 w:0) // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:2 w:0) - /// The range of component `r` is `[0, 99]`. + // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - // Minimum execution time: 9_882 nanoseconds. - Weight::from_ref_time(14_566_711 as u64) - // Standard Error: 3_354 - .saturating_add(Weight::from_ref_time(2_282_038 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (7_867_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_177_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy VotingOf (r:3 w:3) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: Democracy ReferendumInfoOf (r:2 w:2) - /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - // Minimum execution time: 48_840 nanoseconds. - Weight::from_ref_time(56_403_092 as u64) - // Standard Error: 6_093 - .saturating_add(Weight::from_ref_time(3_344_243 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(r as u64))) + (37_902_000 as Weight) + // Standard Error: 4_000 + .saturating_add((4_335_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } // Storage: Democracy VotingOf (r:2 w:2) - // Storage: Democracy ReferendumInfoOf (r:2 w:2) - /// The range of component `r` is `[0, 99]`. + // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - // Minimum execution time: 30_483 nanoseconds. - Weight::from_ref_time(32_035_405 as u64) - // Standard Error: 4_383 - .saturating_add(Weight::from_ref_time(3_347_667 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(r as u64))) + (21_272_000 as Weight) + // Standard Error: 3_000 + .saturating_add((4_351_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - // Minimum execution time: 6_421 nanoseconds. - Weight::from_ref_time(6_638_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (4_913_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_preimage(b: u32, ) -> Weight { + (27_986_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_imminent_preimage(b: u32, ) -> Weight { + (20_058_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + // Storage: System Account (r:1 w:0) + fn reap_preimage(b: u32, ) -> Weight { + (28_619_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - // Minimum execution time: 30_291 nanoseconds. - Weight::from_ref_time(37_071_950 as u64) - // Standard Error: 1_619 - .saturating_add(Weight::from_ref_time(59_302 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (26_619_000 as Weight) + // Standard Error: 1_000 + .saturating_add((56_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - // Minimum execution time: 34_888 nanoseconds. - Weight::from_ref_time(36_418_789 as u64) - // Standard Error: 906 - .saturating_add(Weight::from_ref_time(109_602 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (25_373_000 as Weight) + // Standard Error: 1_000 + .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - // Minimum execution time: 18_739 nanoseconds. - Weight::from_ref_time(21_004_077 as u64) - // Standard Error: 1_075 - .saturating_add(Weight::from_ref_time(116_457 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (15_961_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) - /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - // Minimum execution time: 18_514 nanoseconds. - Weight::from_ref_time(21_030_667 as u64) - // Standard Error: 1_102 - .saturating_add(Weight::from_ref_time(118_039 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (15_992_000 as Weight) + // Standard Error: 1_000 + .saturating_add((113_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/dex/rpc/src/lib.rs b/frame/dex/rpc/src/lib.rs index 17df74e305a43..4073e5bd7c55e 100644 --- a/frame/dex/rpc/src/lib.rs +++ b/frame/dex/rpc/src/lib.rs @@ -21,7 +21,7 @@ use std::{marker::PhantomData, sync::Arc}; use codec::Codec; use jsonrpsee::{ - core::RpcResult, + core::{async_trait, RpcResult}, proc_macros::rpc, types::error::{CallError, ErrorObject}, }; diff --git a/frame/dex/src/lib.rs b/frame/dex/src/lib.rs index 144d341492648..8bdab0ea54b3b 100644 --- a/frame/dex/src/lib.rs +++ b/frame/dex/src/lib.rs @@ -61,7 +61,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; type Currency: ReservableCurrency; diff --git a/frame/dex/src/mock.rs b/frame/dex/src/mock.rs index 1b66bdd61df1f..9678084581fe8 100644 --- a/frame/dex/src/mock.rs +++ b/frame/dex/src/mock.rs @@ -19,11 +19,12 @@ use super::*; use crate as pallet_dex; + use frame_support::{ construct_runtime, instances::{Instance1, Instance2}, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, + traits::{ConstU32, ConstU64}, PalletId, }; use sp_core::H256; @@ -32,7 +33,6 @@ use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -use sp_std::prelude::*; use std::sync::Arc; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -56,8 +56,8 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -65,7 +65,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -82,7 +82,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -92,7 +92,7 @@ impl pallet_balances::Config for Test { } impl pallet_assets::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Balance = u64; type AssetId = u32; type Currency = Balances; @@ -106,13 +106,11 @@ impl pallet_assets::Config for Test { type Freezer = (); type WeightInfo = (); type Extra = (); - type CreateOrigin = AsEnsureOriginWithArg< - frame_system::EnsureSigned<::AccountId>, - >; } +//TODO: limit creation only to dex pallet impl pallet_assets::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Balance = u64; type AssetId = u32; type Currency = Balances; @@ -126,9 +124,6 @@ impl pallet_assets::Config for Test { type Freezer = (); type WeightInfo = (); type Extra = (); - type CreateOrigin = AsEnsureOriginWithArg< - frame_system::EnsureSigned<::AccountId>, - >; } parameter_types! { @@ -136,7 +131,7 @@ parameter_types! { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type AssetBalance = ::Balance; type Assets = Assets; diff --git a/frame/dex/src/tests.rs b/frame/dex/src/tests.rs index 9687baf5c70dc..29b9c09042bbe 100644 --- a/frame/dex/src/tests.rs +++ b/frame/dex/src/tests.rs @@ -26,7 +26,7 @@ fn events() -> Vec> { let result = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let mock::RuntimeEvent::Dex(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let mock::Event::Dex(inner) = e { Some(inner) } else { None }) .collect(); System::reset_events(); @@ -58,7 +58,7 @@ fn pool_assets() -> Vec { fn create_tokens(owner: u64, tokens: Vec) { for token_id in tokens { - assert_ok!(Assets::force_create(RuntimeOrigin::root(), token_id, owner, true, 1)); + assert_ok!(Assets::force_create(Origin::root(), token_id, owner, true, 1)); } } @@ -87,7 +87,7 @@ fn create_pool_should_work() { create_tokens(user, vec![token_1, token_2]); - assert_ok!(Dex::create_pool(RuntimeOrigin::signed(user), token_2, token_1, lp_token)); + assert_ok!(Dex::create_pool(Origin::signed(user), token_2, token_1, lp_token)); assert_eq!(events(), [Event::::PoolCreated { creator: user, pool_id, lp_token }]); assert_eq!(pools(), vec![pool_id]); @@ -107,13 +107,13 @@ fn add_liquidity_should_work() { topup_pallet(); create_tokens(user, vec![token_1, token_2]); - assert_ok!(Dex::create_pool(RuntimeOrigin::signed(user), token_1, token_2, lp_token)); + assert_ok!(Dex::create_pool(Origin::signed(user), token_1, token_2, lp_token)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_1, user, 1000)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_2, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_1, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_2, user, 1000)); assert_ok!(Dex::add_liquidity( - RuntimeOrigin::signed(user), + Origin::signed(user), token_1, token_2, 10, @@ -152,13 +152,13 @@ fn remove_liquidity_should_work() { topup_pallet(); create_tokens(user, vec![token_1, token_2]); - assert_ok!(Dex::create_pool(RuntimeOrigin::signed(user), token_1, token_2, lp_token)); + assert_ok!(Dex::create_pool(Origin::signed(user), token_1, token_2, lp_token)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_1, user, 1000)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_2, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_1, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_2, user, 1000)); assert_ok!(Dex::add_liquidity( - RuntimeOrigin::signed(user), + Origin::signed(user), token_1, token_2, 10, @@ -169,16 +169,7 @@ fn remove_liquidity_should_work() { 2 )); - assert_ok!(Dex::remove_liquidity( - RuntimeOrigin::signed(user), - token_1, - token_2, - 9, - 0, - 0, - user, - 2 - )); + assert_ok!(Dex::remove_liquidity(Origin::signed(user), token_1, token_2, 9, 0, 0, user, 2)); assert!(events().contains(&Event::::LiquidityRemoved { who: user, @@ -211,13 +202,13 @@ fn quote_price_should_work() { topup_pallet(); create_tokens(user, vec![token_1, token_2]); - assert_ok!(Dex::create_pool(RuntimeOrigin::signed(user), token_1, token_2, lp_token)); + assert_ok!(Dex::create_pool(Origin::signed(user), token_1, token_2, lp_token)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_1, user, 1000)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_2, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_1, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_2, user, 1000)); assert_ok!(Dex::add_liquidity( - RuntimeOrigin::signed(user), + Origin::signed(user), token_1, token_2, 1000, @@ -242,15 +233,15 @@ fn swap_should_work() { topup_pallet(); create_tokens(user, vec![token_1, token_2]); - assert_ok!(Dex::create_pool(RuntimeOrigin::signed(user), token_1, token_2, lp_token)); + assert_ok!(Dex::create_pool(Origin::signed(user), token_1, token_2, lp_token)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_1, user, 1000)); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_2, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_1, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_2, user, 1000)); let liquidity1 = 1000; let liquidity2 = 20; assert_ok!(Dex::add_liquidity( - RuntimeOrigin::signed(user), + Origin::signed(user), token_1, token_2, liquidity1, @@ -265,7 +256,7 @@ fn swap_should_work() { let exchange_amount = 10; assert_ok!(Dex::swap_exact_tokens_for_tokens( - RuntimeOrigin::signed(user), + Origin::signed(user), token_2, token_1, exchange_amount, @@ -293,17 +284,17 @@ fn same_asset_swap_should_fail() { create_tokens(user, vec![token_1]); assert_noop!( - Dex::create_pool(RuntimeOrigin::signed(user), token_1, token_1, lp_token), + Dex::create_pool(Origin::signed(user), token_1, token_1, lp_token), Error::::EqualAssets ); - assert_ok!(Assets::mint(RuntimeOrigin::signed(user), token_1, user, 1000)); + assert_ok!(Assets::mint(Origin::signed(user), token_1, user, 1000)); let liquidity1 = 1000; let liquidity2 = 20; assert_noop!( Dex::add_liquidity( - RuntimeOrigin::signed(user), + Origin::signed(user), token_1, token_1, liquidity1, @@ -319,7 +310,7 @@ fn same_asset_swap_should_fail() { let exchange_amount = 10; assert_noop!( Dex::swap_exact_tokens_for_tokens( - RuntimeOrigin::signed(user), + Origin::signed(user), token_1, token_1, exchange_amount, diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index ca94fef6a4356..bbb0adf02e366 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -40,7 +40,7 @@ rand = { version = "0.7.3", default-features = false, features = ["alloc", "smal strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } [dev-dependencies] -parking_lot = "0.12.1" +parking_lot = "0.12.0" rand = { version = "0.7.3" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } @@ -52,7 +52,6 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } [features] default = ["std"] std = [ - "pallet-election-provider-support-benchmarking?/std", "codec/std", "scale-info/std", "log/std", @@ -69,7 +68,7 @@ std = [ "frame-election-provider-support/std", "log/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "rand/std", "strum/std", ] diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 10041f6aec07c..a8195df7305ff 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -220,7 +220,11 @@ frame_benchmarking::benchmarks! { let receiver = account("receiver", 0, SEED); let initial_balance = T::Currency::minimum_balance() * 10u32.into(); T::Currency::make_free_balance_be(&receiver, initial_balance); - let ready = Default::default(); + let ready = ReadySolution { + supports: vec![], + score: Default::default(), + compute: Default::default() + }; let deposit: BalanceOf = 10u32.into(); let reward: BalanceOf = T::SignedRewardBase::get(); @@ -316,14 +320,21 @@ frame_benchmarking::benchmarks! { } submit { - // the queue is full and the solution is only better than the worse. + // the solution will be worse than all of them meaning the score need to be checked against + // ~ log2(c) + let solution = RawSolution { + score: ElectionScore { minimal_stake: 10_000_000u128 - 1, ..Default::default() }, + ..Default::default() + }; + >::create_snapshot().map_err(<&str>::from)?; MultiPhase::::on_initialize_open_signed(); >::put(1); let mut signed_submissions = SignedSubmissions::::get(); - // Insert `max` submissions + // Insert `max - 1` submissions because the call to `submit` will insert another + // submission and the score is worse then the previous scores. for i in 0..(T::SignedMaxSubmissions::get() - 1) { let raw_solution = RawSolution { score: ElectionScore { minimal_stake: 10_000_000u128 + (i as u128), ..Default::default() }, @@ -339,12 +350,6 @@ frame_benchmarking::benchmarks! { } signed_submissions.put(); - // this score will eject the weakest one. - let solution = RawSolution { - score: ElectionScore { minimal_stake: 10_000_000u128 + 1, ..Default::default() }, - ..Default::default() - }; - let caller = frame_benchmarking::whitelisted_caller(); let deposit = MultiPhase::::deposit_for( &solution, @@ -399,7 +404,7 @@ frame_benchmarking::benchmarks! { assert_eq!(raw_solution.solution.voter_count() as u32, a); assert_eq!(raw_solution.solution.unique_targets().len() as u32, d); }: { - assert!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned).is_ok()); + assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); } // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index bc19e5143424c..e1d3cb8ed5dee 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -114,8 +114,8 @@ //! If we reach the end of both phases (i.e. call to [`ElectionProvider::elect`] happens) and no //! good solution is queued, then the fallback strategy [`pallet::Config::Fallback`] is used to //! determine what needs to be done. The on-chain election is slow, and contains no balancing or -//! reduction post-processing. If [`pallet::Config::Fallback`] fails, the next phase -//! [`Phase::Emergency`] is enabled, which is a more *fail-safe* approach. +//! reduction post-processing. [`NoFallback`] does nothing and enables [`Phase::Emergency`], which +//! is a more *fail-safe* approach. //! //! ### Emergency Phase //! @@ -231,25 +231,22 @@ use codec::{Decode, Encode}; use frame_election_provider_support::{ - BoundedSupportsOf, ElectionDataProvider, ElectionProvider, ElectionProviderBase, - InstantElectionProvider, NposSolution, + ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, }; use frame_support::{ - dispatch::DispatchClass, + dispatch::DispatchResultWithPostInfo, ensure, traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, - weights::Weight, - DefaultNoBound, EqNoBound, PartialEqNoBound, + weights::{DispatchClass, Weight}, }; use frame_system::{ensure_none, offchain::SendTransactionTypes}; use scale_info::TypeInfo; use sp_arithmetic::{ - traits::{CheckedAdd, Zero}, + traits::{Bounded, CheckedAdd, Zero}, UpperOf, }; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, BoundedSupports, ElectionScore, EvaluateSupport, - Supports, VoteWeight, + assignment_ratio_to_staked_normalized, ElectionScore, EvaluateSupport, Supports, VoteWeight, }; use sp_runtime::{ transaction_validity::{ @@ -269,7 +266,6 @@ pub mod helpers; const LOG_TARGET: &str = "runtime::election-provider"; -pub mod migrations; pub mod signed; pub mod unsigned; pub mod weights; @@ -293,7 +289,7 @@ pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex pub type SolutionAccuracyOf = ::MinerConfig> as NposSolution>::Accuracy; /// The fallback election type. -pub type FallbackErrorOf = <::Fallback as ElectionProviderBase>::Error; +pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; /// Configuration for the benchmarks of the pallet. pub trait BenchmarkingConfig { @@ -313,6 +309,27 @@ pub trait BenchmarkingConfig { const MAXIMUM_TARGETS: u32; } +/// A fallback implementation that transitions the pallet to the emergency phase. +pub struct NoFallback(sp_std::marker::PhantomData); + +impl ElectionProvider for NoFallback { + type AccountId = T::AccountId; + type BlockNumber = T::BlockNumber; + type DataProvider = T::DataProvider; + type Error = &'static str; + + fn elect() -> Result, Self::Error> { + // Do nothing, this will enable the emergency phase. + Err("NoFallback.") + } +} + +impl InstantElectionProvider for NoFallback { + fn elect_with_bounds(_: usize, _: usize) -> Result, Self::Error> { + Err("NoFallback.") + } +} + /// Current phase of the pallet. #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] pub enum Phase { @@ -420,23 +437,13 @@ impl Default for RawSolution { } /// A checked solution, ready to be enacted. -#[derive( - PartialEqNoBound, - EqNoBound, - Clone, - Encode, - Decode, - RuntimeDebug, - DefaultNoBound, - scale_info::TypeInfo, -)] -#[scale_info(skip_type_params(T))] -pub struct ReadySolution { +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] +pub struct ReadySolution { /// The final supports of the solution. /// /// This is target-major vector, storing each winners, total backing, and each individual /// backer. - pub supports: BoundedSupports, + pub supports: Supports, /// The score of the solution. /// /// This is needed to potentially challenge the solution. @@ -450,6 +457,7 @@ pub struct ReadySolution { /// /// These are stored together because they are often accessed together. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] +#[codec(mel_bound())] #[scale_info(skip_type_params(T))] pub struct RoundSnapshot { /// All of the voters. @@ -487,8 +495,6 @@ pub enum ElectionError { DataProvider(&'static str), /// An error nested in the fallback. Fallback(FallbackErrorOf), - /// No solution has been queued. - NothingQueued, } // NOTE: we have to do this manually because of the additional where clause needed on @@ -545,8 +551,6 @@ pub enum FeasibilityError { InvalidRound, /// Comparison against `MinimumUntrustedScore` failed. UntrustedScoreTooLow, - /// Data Provider returned too many desired targets - TooManyDesiredTargets, } impl From for FeasibilityError { @@ -560,16 +564,13 @@ pub use pallet::*; pub mod pallet { use super::*; use frame_election_provider_support::{InstantElectionProvider, NposSolver}; - use frame_support::{ - pallet_prelude::*, - traits::{DefensiveResult, EstimateCallFee}, - }; + use frame_support::{pallet_prelude::*, traits::EstimateCallFee}; use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config + SendTransactionTypes> { - type RuntimeEvent: From> - + IsType<::RuntimeEvent> + type Event: From> + + IsType<::Event> + TryInto>; /// Currency type. @@ -663,13 +664,6 @@ pub mod pallet { #[pallet::constant] type MaxElectableTargets: Get>; - /// The maximum number of winners that can be elected by this `ElectionProvider` - /// implementation. - /// - /// Note: This must always be greater or equal to `T::DataProvider::desired_targets()`. - #[pallet::constant] - type MaxWinners: Get; - /// Handler for the slashed deposits. type SlashHandler: OnUnbalanced>; @@ -687,7 +681,6 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Self::DataProvider, - MaxWinners = Self::MaxWinners, >; /// Configuration of the governance-only fallback. @@ -698,7 +691,6 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Self::DataProvider, - MaxWinners = Self::MaxWinners, >; /// OCW election solution miner algorithm implementation. @@ -706,7 +698,7 @@ pub mod pallet { /// Origin that can control this pallet. Note that any action taken by this origin (such) /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The configuration of benchmarking. type BenchmarkingConfig: BenchmarkingConfig; @@ -715,25 +707,6 @@ pub mod pallet { type WeightInfo: WeightInfo; } - // Expose miner configs over the metadata such that they can be re-implemented. - #[pallet::extra_constants] - impl Pallet { - #[pallet::constant_name(MinerMaxLength)] - fn max_length() -> u32 { - ::MaxLength::get() - } - - #[pallet::constant_name(MinerMaxWeight)] - fn max_weight() -> Weight { - ::MaxWeight::get() - } - - #[pallet::constant_name(MinerMaxVotesPerVoter)] - fn max_votes_per_voter() -> u32 { - ::MaxVotesPerVoter::get() - } - } - #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(now: T::BlockNumber) -> Weight { @@ -904,7 +877,7 @@ pub mod pallet { origin: OriginFor, raw_solution: Box>>, witness: SolutionOrSnapshotSize, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { ensure_none(origin)?; let error_message = "Invalid unsigned submission must produce invalid block and \ deprive validator from their authoring reward."; @@ -928,11 +901,11 @@ pub mod pallet { let ejected_a_solution = >::exists(); >::put(ready); Self::deposit_event(Event::SolutionStored { - compute: ElectionCompute::Unsigned, + election_compute: ElectionCompute::Unsigned, prev_ejected: ejected_a_solution, }); - Ok(()) + Ok(None.into()) } /// Set a new value for `MinimumUntrustedScore`. @@ -966,11 +939,9 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); - // bound supports with T::MaxWinners - let supports = supports.try_into().map_err(|_| Error::::TooManyWinners)?; - // Note: we don't `rotate_round` at this point; the next call to // `ElectionProvider::elect` will succeed and take care of that. + let solution = ReadySolution { supports, score: Default::default(), @@ -978,7 +949,7 @@ pub mod pallet { }; Self::deposit_event(Event::SolutionStored { - compute: ElectionCompute::Emergency, + election_compute: ElectionCompute::Emergency, prev_ejected: QueuedSolution::::exists(), }); @@ -1013,7 +984,7 @@ pub mod pallet { let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; ensure!( - Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), + Self::solution_weight_of(&raw_solution, size) < T::SignedMaxWeight::get(), Error::::SignedTooMuchWeight, ); @@ -1021,7 +992,7 @@ pub mod pallet { let deposit = Self::deposit_for(&raw_solution, size); let call_fee = { let call = Call::submit { raw_solution: raw_solution.clone() }; - T::EstimateCallFee::estimate_call_fee(&call, None::.into()) + T::EstimateCallFee::estimate_call_fee(&call, None.into()) }; let submission = SignedSubmission { @@ -1054,7 +1025,7 @@ pub mod pallet { signed_submissions.put(); Self::deposit_event(Event::SolutionStored { - compute: ElectionCompute::Signed, + election_compute: ElectionCompute::Signed, prev_ejected: ejected_a_solution, }); Ok(()) @@ -1073,20 +1044,17 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); - let supports = - T::GovernanceFallback::instant_elect(maybe_max_voters, maybe_max_targets).map_err( - |e| { - log!(error, "GovernanceFallback failed: {:?}", e); - Error::::FallbackFailed - }, - )?; + let maybe_max_voters = maybe_max_voters.map(|x| x as usize); + let maybe_max_targets = maybe_max_targets.map(|x| x as usize); - // transform BoundedVec<_, T::GovernanceFallback::MaxWinners> into - // `BoundedVec<_, T::MaxWinners>` - let supports: BoundedVec<_, T::MaxWinners> = supports - .into_inner() - .try_into() - .defensive_map_err(|_| Error::::BoundNotMet)?; + let supports = T::GovernanceFallback::elect_with_bounds( + maybe_max_voters.unwrap_or(Bounded::max_value()), + maybe_max_targets.unwrap_or(Bounded::max_value()), + ) + .map_err(|e| { + log!(error, "GovernanceFallback failed: {:?}", e); + Error::::FallbackFailed + })?; let solution = ReadySolution { supports, @@ -1095,7 +1063,7 @@ pub mod pallet { }; Self::deposit_event(Event::SolutionStored { - compute: ElectionCompute::Fallback, + election_compute: ElectionCompute::Fallback, prev_ejected: QueuedSolution::::exists(), }); @@ -1113,13 +1081,10 @@ pub mod pallet { /// solution is unsigned, this means that it has also been processed. /// /// The `bool` is `true` when a previous solution was ejected to make room for this one. - SolutionStored { compute: ElectionCompute, prev_ejected: bool }, - /// The election has been finalized, with the given computation and score. - ElectionFinalized { compute: ElectionCompute, score: ElectionScore }, - /// An election failed. - /// - /// Not much can be said about which computes failed in the process. - ElectionFailed, + SolutionStored { election_compute: ElectionCompute, prev_ejected: bool }, + /// The election has been finalized, with `Some` of the given computation, or else if the + /// election failed, `None`. + ElectionFinalized { election_compute: Option }, /// An account has been rewarded for their signed submission being finalized. Rewarded { account: ::AccountId, value: BalanceOf }, /// An account has been slashed for submitting an invalid signed submission. @@ -1157,10 +1122,6 @@ pub mod pallet { CallNotAllowed, /// The fallback failed FallbackFailed, - /// Some bound not met - BoundNotMet, - /// Submitted solution has too many winners - TooManyWinners, } #[pallet::validate_unsigned] @@ -1234,7 +1195,7 @@ pub mod pallet { /// Current best solution, signed or unsigned, queued to be returned upon `elect`. #[pallet::storage] #[pallet::getter(fn queued_solution)] - pub type QueuedSolution = StorageValue<_, ReadySolution>; + pub type QueuedSolution = StorageValue<_, ReadySolution>; /// Snapshot data of the round. /// @@ -1273,8 +1234,8 @@ pub mod pallet { #[pallet::storage] pub type SignedSubmissionNextIndex = StorageValue<_, u32, ValueQuery>; - /// A sorted, bounded vector of `(score, block_number, index)`, where each `index` points to a - /// value in `SignedSubmissions`. + /// A sorted, bounded set of `(score, index)`, where each `index` points to a value in + /// `SignedSubmissions`. /// /// We never need to process more than a single signed submission at a time. Signed submissions /// can be quite large, so we're willing to pay the cost of multiple database accesses to access @@ -1304,14 +1265,9 @@ pub mod pallet { #[pallet::getter(fn minimum_untrusted_score)] pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; - /// The current storage version. - /// - /// v1: https://github.com/paritytech/substrate/pull/12237/ - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] - #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData); } @@ -1400,28 +1356,31 @@ impl Pallet { let targets = T::DataProvider::electable_targets(Some(target_limit)) .map_err(ElectionError::DataProvider)?; - let voters = T::DataProvider::electing_voters(Some(voter_limit)) .map_err(ElectionError::DataProvider)?; + let mut desired_targets = + T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; + // Defensive-only. if targets.len() > target_limit || voters.len() > voter_limit { + debug_assert!(false, "Snapshot limit has not been respected."); return Err(ElectionError::DataProvider("Snapshot too big for submission.")) } - let mut desired_targets = - T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; - - // If `desired_targets` > `targets.len()`, cap `desired_targets` to that - // level and emit a warning - let max_desired_targets: u32 = (targets.len() as u32).min(T::MaxWinners::get()); - if desired_targets > max_desired_targets { + // If `desired_targets` > `targets.len()`, cap `desired_targets` to that level and emit a + // warning + let max_len = targets + .len() + .try_into() + .map_err(|_| ElectionError::DataProvider("Failed to convert usize"))?; + if desired_targets > max_len { log!( warn, "desired_targets: {} > targets.len(): {}, capping desired_targets", desired_targets, - max_desired_targets + max_len ); - desired_targets = max_desired_targets; + desired_targets = max_len; } Ok((targets, voters, desired_targets)) @@ -1470,7 +1429,7 @@ impl Pallet { pub fn feasibility_check( raw_solution: RawSolution>, compute: ElectionCompute, - ) -> Result, FeasibilityError> { + ) -> Result, FeasibilityError> { let RawSolution { solution, score, round } = raw_solution; // First, check round. @@ -1483,11 +1442,6 @@ impl Pallet { Self::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; ensure!(winners.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); - // Fail early if targets requested by data provider exceed maximum winners supported. - ensure!( - desired_targets <= ::MaxWinners::get(), - FeasibilityError::TooManyDesiredTargets - ); // Ensure that the solution's score can pass absolute min-score. let submitted_score = raw_solution.score; @@ -1548,8 +1502,6 @@ impl Pallet { let known_score = supports.evaluate(); ensure!(known_score == score, FeasibilityError::InvalidScore); - // Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. - let supports = supports.try_into().expect("checked desired_targets <= MaxWinners; qed"); Ok(ReadySolution { supports, compute, score }) } @@ -1569,7 +1521,7 @@ impl Pallet { Self::kill_snapshot(); } - fn do_elect() -> Result, ElectionError> { + fn do_elect() -> Result, ElectionError> { // We have to unconditionally try finalizing the signed phase here. There are only two // possibilities: // @@ -1579,27 +1531,23 @@ impl Pallet { // inexpensive (1 read of an empty vector). let _ = Self::finalize_signed_phase(); >::take() - .ok_or(ElectionError::::NothingQueued) - .or_else(|_| { - T::Fallback::instant_elect(None, None) - .map_err(|fe| ElectionError::Fallback(fe)) - .and_then(|supports| { - Ok(ReadySolution { - supports, - score: Default::default(), - compute: ElectionCompute::Fallback, - }) - }) - }) - .map(|ReadySolution { compute, score, supports }| { - Self::deposit_event(Event::ElectionFinalized { compute, score }); + .map_or_else( + || { + T::Fallback::elect() + .map_err(|fe| ElectionError::Fallback(fe)) + .map(|supports| (supports, ElectionCompute::Fallback)) + }, + |ReadySolution { supports, compute, .. }| Ok((supports, compute)), + ) + .map(|(supports, compute)| { + Self::deposit_event(Event::ElectionFinalized { election_compute: Some(compute) }); if Self::round() != 1 { log!(info, "Finalized election round with compute {:?}.", compute); } supports }) .map_err(|err| { - Self::deposit_event(Event::ElectionFailed); + Self::deposit_event(Event::ElectionFinalized { election_compute: None }); if Self::round() != 1 { log!(warn, "Failed to finalize election round. reason {:?}", err); } @@ -1618,23 +1566,13 @@ impl Pallet { } } -impl ElectionProviderBase for Pallet { +impl ElectionProvider for Pallet { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type Error = ElectionError; - type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; -} - -impl ElectionProvider for Pallet { - fn ongoing() -> bool { - match Self::current_phase() { - Phase::Off => false, - _ => true, - } - } - fn elect() -> Result, Self::Error> { + fn elect() -> Result, Self::Error> { match Self::do_elect() { Ok(supports) => { // All went okay, record the weight, put sign to be Off, clean snapshot, etc. @@ -1863,12 +1801,12 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, AccountId, - ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, - SignedMaxSubmissions, System, TargetIndex, Targets, + multi_phase_events, roll_to, AccountId, ExtBuilder, MockWeightInfo, MockedWeightInfo, + MultiPhase, Origin, Runtime, SignedMaxSubmissions, System, TargetIndex, Targets, }, Phase, }; + use frame_election_provider_support::ElectionProvider; use frame_support::{assert_noop, assert_ok}; use sp_npos_elections::{BalancingConfig, Support}; @@ -1888,7 +1826,7 @@ mod tests { assert!(MultiPhase::snapshot().is_none()); assert_eq!(MultiPhase::round(), 1); - roll_to_signed(); + roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert!(MultiPhase::snapshot().is_some()); @@ -1899,7 +1837,7 @@ mod tests { assert!(MultiPhase::snapshot().is_some()); assert_eq!(MultiPhase::round(), 1); - roll_to_unsigned(); + roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert_eq!( multi_phase_events(), @@ -1932,29 +1870,11 @@ mod tests { roll_to(44); assert!(MultiPhase::current_phase().is_off()); - roll_to_signed(); + roll_to(45); assert!(MultiPhase::current_phase().is_signed()); roll_to(55); assert!(MultiPhase::current_phase().is_unsigned_open_at(55)); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - }, - Event::SignedPhaseStarted { round: 2 }, - Event::UnsignedPhaseStarted { round: 2 } - ] - ); }) } @@ -1978,21 +1898,6 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - } - ] - ); }); } @@ -2005,7 +1910,7 @@ mod tests { roll_to(19); assert!(MultiPhase::current_phase().is_off()); - roll_to_signed(); + roll_to(20); assert!(MultiPhase::current_phase().is_signed()); assert!(MultiPhase::snapshot().is_some()); @@ -2016,21 +1921,6 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - } - ] - ) }); } @@ -2053,14 +1943,6 @@ mod tests { assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); - - assert_eq!( - multi_phase_events(), - vec![Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } - }] - ); }); } @@ -2069,13 +1951,16 @@ mod tests { // An early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // Signed phase started at block 15 and will end at 25. + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to_signed(); + roll_to(15); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); // An unexpected call to elect. + roll_to(20); assert_ok!(MultiPhase::elect()); // We surely can't have any feasible solutions. This will cause an on-chain election. @@ -2083,10 +1968,7 @@ mod tests { multi_phase_events(), vec![ Event::SignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: Default::default() - } + Event::ElectionFinalized { election_compute: Some(ElectionCompute::Fallback) } ], ); // All storage items must be cleared. @@ -2104,8 +1986,10 @@ mod tests { // an early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // signed phase started at block 15 and will end at 25. + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to_signed(); + roll_to(15); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); @@ -2116,13 +2000,11 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit( - crate::mock::RuntimeOrigin::signed(99), - Box::new(solution) - )); + assert_ok!(MultiPhase::submit(crate::mock::Origin::signed(99), Box::new(solution))); } // an unexpected call to elect. + roll_to(20); assert_ok!(MultiPhase::elect()); // all storage items must be cleared. @@ -2132,119 +2014,13 @@ mod tests { assert!(MultiPhase::desired_targets().is_none()); assert!(MultiPhase::queued_solution().is_none()); assert!(MultiPhase::signed_submissions().is_empty()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Slashed { account: 99, value: 5 }, - Event::Slashed { account: 99, value: 5 }, - Event::Slashed { account: 99, value: 5 }, - Event::Slashed { account: 99, value: 5 }, - Event::Slashed { account: 99, value: 5 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - } - ] - ); - }) - } - - #[test] - fn check_events_with_compute_signed() { - ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); - assert!(MultiPhase::current_phase().is_signed()); - - let solution = raw_solution(); - assert_ok!(MultiPhase::submit( - crate::mock::RuntimeOrigin::signed(99), - Box::new(solution) - )); - - roll_to(30); - assert_ok!(MultiPhase::elect()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Rewarded { account: 99, value: 7 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Signed, - score: ElectionScore { - minimal_stake: 40, - sum_stake: 100, - sum_stake_squared: 5200 - } - } - ], - ); - }) - } - - #[test] - fn check_events_with_compute_unsigned() { - ExtBuilder::default().build_and_execute(|| { - roll_to_unsigned(); - assert!(MultiPhase::current_phase().is_unsigned()); - - // ensure we have snapshots in place. - assert!(MultiPhase::snapshot().is_some()); - assert_eq!(MultiPhase::desired_targets().unwrap(), 2); - - // mine seq_phragmen solution with 2 iters. - let (solution, witness) = MultiPhase::mine_solution().unwrap(); - - // ensure this solution is valid. - assert!(MultiPhase::queued_solution().is_none()); - assert_ok!(MultiPhase::submit_unsigned( - crate::mock::RuntimeOrigin::none(), - Box::new(solution), - witness - )); - assert!(MultiPhase::queued_solution().is_some()); - - assert_ok!(MultiPhase::elect()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::SolutionStored { - compute: ElectionCompute::Unsigned, - prev_ejected: false - }, - Event::ElectionFinalized { - compute: ElectionCompute::Unsigned, - score: ElectionScore { - minimal_stake: 40, - sum_stake: 100, - sum_stake_squared: 5200 - } - } - ], - ); }) } #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far, but we get a result. @@ -2257,27 +2033,11 @@ mod tests { (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) ] - ); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - } - ] - ); + ) }); ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2285,22 +2045,13 @@ mod tests { assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(MultiPhase::current_phase(), Phase::Emergency); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFailed - ] - ); }) } #[test] fn governance_fallback_works() { ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2313,12 +2064,12 @@ mod tests { // no single account can trigger this assert_noop!( - MultiPhase::governance_fallback(RuntimeOrigin::signed(99), None, None), + MultiPhase::governance_fallback(Origin::signed(99), None, None), DispatchError::BadOrigin ); // only root can - assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root(), None, None)); + assert_ok!(MultiPhase::governance_fallback(Origin::root(), None, None)); // something is queued now assert!(MultiPhase::queued_solution().is_some()); // next election call with fix everything.; @@ -2330,15 +2081,12 @@ mod tests { vec![ Event::SignedPhaseStarted { round: 1 }, Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFailed, + Event::ElectionFinalized { election_compute: None }, Event::SolutionStored { - compute: ElectionCompute::Fallback, + election_compute: ElectionCompute::Fallback, prev_ejected: false }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: Default::default() - } + Event::ElectionFinalized { election_compute: Some(ElectionCompute::Fallback) } ] ); }) @@ -2359,16 +2107,9 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Off); // On-chain backup works though. + roll_to(29); let supports = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); - - assert_eq!( - multi_phase_events(), - vec![Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } - }] - ); }); } @@ -2392,8 +2133,6 @@ mod tests { let err = MultiPhase::elect().unwrap_err(); assert_eq!(err, ElectionError::Fallback("NoFallback.")); assert_eq!(MultiPhase::current_phase(), Phase::Emergency); - - assert_eq!(multi_phase_events(), vec![Event::ElectionFailed]); }); } @@ -2407,7 +2146,7 @@ mod tests { crate::mock::MaxElectingVoters::set(2); // Signed phase opens just fine. - roll_to_signed(); + roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!( @@ -2420,7 +2159,7 @@ mod tests { #[test] fn untrusted_score_verification_is_respected() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); // set the solution balancing to get the desired score. @@ -2465,8 +2204,8 @@ mod tests { }; let mut active = 1; - while weight_with(active) - .all_lte(::BlockWeights::get().max_block) || + while weight_with(active) <= + ::BlockWeights::get().max_block || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/migrations.rs b/frame/election-provider-multi-phase/src/migrations.rs deleted file mode 100644 index 77efe0d0c5e92..0000000000000 --- a/frame/election-provider-multi-phase/src/migrations.rs +++ /dev/null @@ -1,78 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod v1 { - use frame_support::{ - storage::unhashed, - traits::{Defensive, GetStorageVersion, OnRuntimeUpgrade}, - BoundedVec, - }; - use sp_std::collections::btree_map::BTreeMap; - - use crate::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { - fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - - log!( - info, - "Running migration with current storage version {:?} / onchain {:?}", - current, - onchain - ); - - if current == 1 && onchain == 0 { - if SignedSubmissionIndices::::exists() { - // This needs to be tested at a both a block height where this value exists, and - // when it doesn't. - let now = frame_system::Pallet::::block_number(); - let map = unhashed::get::>( - &SignedSubmissionIndices::::hashed_key(), - ) - .defensive_unwrap_or_default(); - let vector = map - .into_iter() - .map(|(score, index)| (score, now, index)) - .collect::>(); - - log!( - debug, - "{:?} SignedSubmissionIndices read from storage (max: {:?})", - vector.len(), - T::SignedMaxSubmissions::get() - ); - - // defensive-only, assuming a constant `SignedMaxSubmissions`. - let bounded = BoundedVec::<_, _>::truncate_from(vector); - SignedSubmissionIndices::::put(bounded); - - log!(info, "SignedSubmissionIndices existed and got migrated"); - } else { - log!(info, "SignedSubmissionIndices did NOT exist."); - } - - current.put::>(); - T::DbWeight::get().reads_writes(2, 1) - } else { - log!(info, "Migration did not execute. This probably should be removed"); - T::DbWeight::get().reads(1) - } - } - } -} diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 8ab7e5bbf733d..7eff70b47eba5 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -19,14 +19,14 @@ use super::*; use crate::{self as multi_phase, unsigned::MinerConfig}; use frame_election_provider_support::{ data_provider, - onchain::{self}, + onchain::{self, UnboundedExecution}, ElectionDataProvider, NposSolution, SequentialPhragmen, }; pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; use frame_support::{ bounded_vec, parameter_types, traits::{ConstU32, Hooks}, - weights::{constants, Weight}, + weights::Weight, BoundedVec, }; use multi_phase::unsigned::{IndexAssignmentOf, VoterOf}; @@ -50,8 +50,7 @@ use sp_runtime::{ use std::sync::Arc; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where @@ -86,7 +85,7 @@ pub(crate) fn multi_phase_events() -> Vec> { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::MultiPhase(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::MultiPhase(inner) = e { Some(inner) } else { None }) .collect::>() } @@ -99,17 +98,6 @@ pub fn roll_to(n: BlockNumber) { } } -pub fn roll_to_unsigned() { - while !matches!(MultiPhase::current_phase(), Phase::Unsigned(_)) { - roll_to(System::block_number() + 1); - } -} -pub fn roll_to_signed() { - while !matches!(MultiPhase::current_phase(), Phase::Signed) { - roll_to(System::block_number() + 1); - } -} - pub fn roll_to_with_ocw(n: BlockNumber) { let now = System::block_number(); for i in now + 1..=n { @@ -155,7 +143,7 @@ pub fn trim_helpers() -> TrimHelpers { seq_phragmen(desired_targets as usize, targets.clone(), voters.clone(), None).unwrap(); // sort by decreasing order of stake - assignments.sort_by_key(|assignment| { + assignments.sort_unstable_by_key(|assignment| { std::cmp::Reverse(stakes.get(&assignment.who).cloned().unwrap_or_default()) }); @@ -210,16 +198,16 @@ pub fn witness() -> SolutionOrSnapshotSize { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -238,15 +226,12 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults( - Weight::from_parts(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), - NORMAL_DISPATCH_RATIO, - ); + ::with_sensible_defaults(2 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); } impl pallet_balances::Config for Runtime { type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -256,9 +241,8 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); } -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy)] +#[derive(Eq, PartialEq, Debug, Clone, Copy)] pub enum MockedWeightInfo { - #[default] Basic, Complex, Real, @@ -297,7 +281,6 @@ parameter_types! { pub static MockWeightInfo: MockedWeightInfo = MockedWeightInfo::Real; pub static MaxElectingVoters: VoterIndex = u32::max_value(); pub static MaxElectableTargets: TargetIndex = TargetIndex::max_value(); - pub static MaxWinners: u32 = 200; pub static EpochLength: u64 = 30; pub static OnChainFallback: bool = true; @@ -309,30 +292,33 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen, Balancing>; type DataProvider = StakingMock; type WeightInfo = (); - type MaxWinners = MaxWinners; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; } pub struct MockFallback; -impl ElectionProviderBase for MockFallback { +impl ElectionProvider for MockFallback { type AccountId = AccountId; type BlockNumber = u64; type Error = &'static str; type DataProvider = StakingMock; - type MaxWinners = MaxWinners; + + fn elect() -> Result, Self::Error> { + Self::elect_with_bounds(Bounded::max_value(), Bounded::max_value()) + } } impl InstantElectionProvider for MockFallback { - fn instant_elect( - max_voters: Option, - max_targets: Option, - ) -> Result, Self::Error> { + fn elect_with_bounds( + max_voters: usize, + max_targets: usize, + ) -> Result, Self::Error> { if OnChainFallback::get() { - onchain::OnChainExecution::::instant_elect(max_voters, max_targets) - .map_err(|_| "onchain::OnChainExecution failed.") + onchain::UnboundedExecution::::elect_with_bounds( + max_voters, + max_targets, + ) + .map_err(|_| "onchain::UnboundedExecution failed.") } else { - Err("NoFallback.") + super::NoFallback::::elect_with_bounds(max_voters, max_targets) } } } @@ -363,11 +349,9 @@ impl MinerConfig for Runtime { fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { match MockWeightInfo::get() { - MockedWeightInfo::Basic => Weight::from_ref_time( - (10 as u64).saturating_add((5 as u64).saturating_mul(a as u64)), - ), - MockedWeightInfo::Complex => - Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64), + MockedWeightInfo::Basic => + (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)), + MockedWeightInfo::Complex => (0 * v + 0 * t + 1000 * a + 0 * d) as Weight, MockedWeightInfo::Real => <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d), } @@ -375,7 +359,7 @@ impl MinerConfig for Runtime { } impl crate::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type EstimateCallFee = frame_support::traits::ConstU32<8>; type SignedPhase = SignedPhase; @@ -397,30 +381,26 @@ impl crate::Config for Runtime { type WeightInfo = (); type BenchmarkingConfig = TestBenchmarkingConfig; type Fallback = MockFallback; - type GovernanceFallback = - frame_election_provider_support::onchain::OnChainExecution; + type GovernanceFallback = UnboundedExecution; type ForceOrigin = frame_system::EnsureRoot; type MaxElectingVoters = MaxElectingVoters; type MaxElectableTargets = MaxElectableTargets; - type MaxWinners = MaxWinners; type MinerConfig = Self; type Solver = SequentialPhragmen, Balancing>; } impl frame_system::offchain::SendTransactionTypes for Runtime where - RuntimeCall: From, + Call: From, { - type OverarchingCall = RuntimeCall; + type OverarchingCall = Call; type Extrinsic = Extrinsic; } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::testing::TestXt; parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; - // only used in testing to manipulate mock behaviour - pub static DataProviderAllowBadData: bool = false; } #[derive(Default)] @@ -435,9 +415,7 @@ impl ElectionDataProvider for StakingMock { fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { let targets = Targets::get(); - if !DataProviderAllowBadData::get() && - maybe_max_len.map_or(false, |max_len| targets.len() > max_len) - { + if maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { return Err("Targets too big") } @@ -448,10 +426,8 @@ impl ElectionDataProvider for StakingMock { maybe_max_len: Option, ) -> data_provider::Result>> { let mut voters = Voters::get(); - if !DataProviderAllowBadData::get() { - if let Some(max_len) = maybe_max_len { - voters.truncate(max_len) - } + if let Some(max_len) = maybe_max_len { + voters.truncate(max_len) } Ok(voters) @@ -572,12 +548,6 @@ impl ExtBuilder { balances: vec![ // bunch of account for submitting stuff only. (99, 100), - (100, 100), - (101, 100), - (102, 100), - (103, 100), - (104, 100), - (105, 100), (999, 100), (9999, 100), ], diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 9d629ad77fd79..eca75139f925a 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -24,11 +24,11 @@ use crate::{ }; use codec::{Decode, Encode, HasCompact}; use frame_election_provider_support::NposSolution; -use frame_support::traits::{ - defensive_prelude::*, Currency, Get, OnUnbalanced, ReservableCurrency, +use frame_support::{ + storage::bounded_btree_map::BoundedBTreeMap, + traits::{defensive_prelude::*, Currency, Get, OnUnbalanced, ReservableCurrency}, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_core::bounded::BoundedVec; use sp_npos_elections::ElectionScore; use sp_runtime::{ traits::{Saturating, Zero}, @@ -37,6 +37,7 @@ use sp_runtime::{ use sp_std::{ cmp::Ordering, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + ops::Deref, vec::Vec, }; @@ -98,12 +99,8 @@ pub type SignedSubmissionOf = SignedSubmission< <::MinerConfig as MinerConfig>::Solution, >; -/// Always sorted vector of a score, submitted at the given block number, which can be found at the -/// given index (`u32`) of the `SignedSubmissionsMap`. -pub type SubmissionIndicesOf = BoundedVec< - (ElectionScore, ::BlockNumber, u32), - ::SignedMaxSubmissions, ->; +pub type SubmissionIndicesOf = + BoundedBTreeMap::SignedMaxSubmissions>; /// Outcome of [`SignedSubmissions::insert`]. pub enum InsertResult { @@ -129,16 +126,6 @@ pub struct SignedSubmissions { } impl SignedSubmissions { - /// `true` if the structure is empty. - pub fn is_empty(&self) -> bool { - self.indices.is_empty() - } - - /// Get the length of submitted solutions. - pub fn len(&self) -> usize { - self.indices.len() - } - /// Get the signed submissions from storage. pub fn get() -> Self { let submissions = SignedSubmissions { @@ -147,12 +134,10 @@ impl SignedSubmissions { insertion_overlay: BTreeMap::new(), deletion_overlay: BTreeSet::new(), }; - // validate that the stored state is sane debug_assert!(submissions .indices - .iter() - .map(|(_, _, index)| index) + .values() .copied() .max() .map_or(true, |max_idx| submissions.next_idx > max_idx,)); @@ -170,8 +155,7 @@ impl SignedSubmissions { .map_or(true, |max_idx| self.next_idx > max_idx,)); debug_assert!(self .indices - .iter() - .map(|(_, _, index)| index) + .values() .copied() .max() .map_or(true, |max_idx| self.next_idx > max_idx,)); @@ -190,9 +174,9 @@ impl SignedSubmissions { /// Get the submission at a particular index. fn get_submission(&self, index: u32) -> Option> { if self.deletion_overlay.contains(&index) { - // Note: can't actually remove the item from the insertion overlay (if present) because - // we don't want to use `&mut self` here. There may be some kind of `RefCell` - // optimization possible here in the future. + // Note: can't actually remove the item from the insertion overlay (if present) + // because we don't want to use `&mut self` here. There may be some kind of + // `RefCell` optimization possible here in the future. None } else { self.insertion_overlay @@ -204,30 +188,27 @@ impl SignedSubmissions { /// Perform three operations: /// - /// - Remove the solution at the given position of `self.indices`. - /// - Insert a new submission (identified by score and insertion index), if provided. - /// - Return the submission which was removed, if any. + /// - Remove a submission (identified by score) + /// - Insert a new submission (identified by score and insertion index) + /// - Return the submission which was removed. + /// + /// Note: in the case that `weakest_score` is not present in `self.indices`, this will return + /// `None` without inserting the new submission and without further notice. /// - /// The call site must ensure that `remove_pos` is a valid index. If otherwise, `None` is - /// silently returned. + /// Note: this does not enforce any ordering relation between the submission removed and that + /// inserted. /// /// Note: this doesn't insert into `insertion_overlay`, the optional new insertion must be - /// inserted into `insertion_overlay` to keep the variable `self` in a valid state. + /// inserted into `insertion_overlay` to keep the variable `self` in a valid state. fn swap_out_submission( &mut self, - remove_pos: usize, - insert: Option<(ElectionScore, T::BlockNumber, u32)>, + remove_score: ElectionScore, + insert: Option<(ElectionScore, u32)>, ) -> Option> { - if remove_pos >= self.indices.len() { - return None - } - - // safe: index was just checked in the line above. - let (_, _, remove_index) = self.indices.remove(remove_pos); - - if let Some((insert_score, block_number, insert_idx)) = insert { + let remove_index = self.indices.remove(&remove_score)?; + if let Some((insert_score, insert_idx)) = insert { self.indices - .try_push((insert_score, block_number, insert_idx)) + .try_insert(insert_score, insert_idx) .expect("just removed an item, we must be under capacity; qed"); } @@ -241,17 +222,20 @@ impl SignedSubmissions { }) } - /// Remove the signed submission with the highest score from the set. - pub fn pop_last(&mut self) -> Option> { - let best_index = self.indices.len().checked_sub(1)?; - self.swap_out_submission(best_index, None) - } - /// Iterate through the set of signed submissions in order of increasing score. pub fn iter(&self) -> impl '_ + Iterator> { - self.indices - .iter() - .filter_map(move |(_score, _bn, idx)| self.get_submission(*idx).defensive()) + self.indices.iter().filter_map(move |(_score, &idx)| { + let maybe_submission = self.get_submission(idx); + if maybe_submission.is_none() { + log!( + error, + "SignedSubmissions internal state is invalid (idx {}); \ + there is a logic error in code handling signed solution submissions", + idx, + ) + } + maybe_submission + }) } /// Empty the set of signed submissions, returning an iterator of signed submissions in @@ -299,54 +283,68 @@ impl SignedSubmissions { /// to `is_score_better`, we do not change anything. pub fn insert(&mut self, submission: SignedSubmissionOf) -> InsertResult { // verify the expectation that we never reuse an index - debug_assert!(!self.indices.iter().map(|(_, _, x)| x).any(|&idx| idx == self.next_idx)); - let block_number = frame_system::Pallet::::block_number(); - - let maybe_weakest = match self.indices.try_push(( - submission.raw_solution.score, - block_number, - self.next_idx, - )) { - Ok(_) => None, - Err(_) => { - // the queue is full -- if this is better, insert it. - let weakest_score = match self.indices.iter().next().defensive() { + debug_assert!(!self.indices.values().any(|&idx| idx == self.next_idx)); + + let weakest = match self.indices.try_insert(submission.raw_solution.score, self.next_idx) { + Ok(Some(prev_idx)) => { + // a submission of equal score was already present in the set; + // no point editing the actual backing map as we know that the newer solution can't + // be better than the old. However, we do need to put the old value back. + self.indices + .try_insert(submission.raw_solution.score, prev_idx) + .expect("didn't change the map size; qed"); + return InsertResult::NotInserted + }, + Ok(None) => { + // successfully inserted into the set; no need to take out weakest member + None + }, + Err((insert_score, insert_idx)) => { + // could not insert into the set because it is full. + // note that we short-circuit return here in case the iteration produces `None`. + // If there wasn't a weakest entry to remove, then there must be a capacity of 0, + // which means that we can't meaningfully proceed. + let weakest_score = match self.indices.iter().next() { None => return InsertResult::NotInserted, - Some((score, _, _)) => *score, + Some((score, _)) => *score, }; let threshold = T::BetterSignedThreshold::get(); // if we haven't improved on the weakest score, don't change anything. - if !submission.raw_solution.score.strict_threshold_better(weakest_score, threshold) - { + if !insert_score.strict_threshold_better(weakest_score, threshold) { return InsertResult::NotInserted } - self.swap_out_submission( - 0, // swap out the worse one, which is always index 0. - Some((submission.raw_solution.score, block_number, self.next_idx)), - ) + self.swap_out_submission(weakest_score, Some((insert_score, insert_idx))) }, }; - // this is the ONLY place that we insert, and we sort post insertion. If scores are the - // same, we sort based on reverse of submission block number. - self.indices - .sort_by(|(score1, bn1, _), (score2, bn2, _)| match score1.cmp(score2) { - Ordering::Equal => bn1.cmp(&bn2).reverse(), - x => x, - }); - // we've taken out the weakest, so update the storage map and the next index debug_assert!(!self.insertion_overlay.contains_key(&self.next_idx)); self.insertion_overlay.insert(self.next_idx, submission); debug_assert!(!self.deletion_overlay.contains(&self.next_idx)); self.next_idx += 1; - match maybe_weakest { + match weakest { Some(weakest) => InsertResult::InsertedEjecting(weakest), None => InsertResult::Inserted, } } + + /// Remove the signed submission with the highest score from the set. + pub fn pop_last(&mut self) -> Option> { + let (score, _) = self.indices.iter().rev().next()?; + // deref in advance to prevent mutable-immutable borrow conflict + let score = *score; + self.swap_out_submission(score, None) + } +} + +impl Deref for SignedSubmissions { + type Target = SubmissionIndicesOf; + + fn deref(&self) -> &Self::Target { + &self.indices + } } impl Pallet { @@ -381,12 +379,6 @@ impl Pallet { Self::snapshot_metadata().unwrap_or_default(); while let Some(best) = all_submissions.pop_last() { - log!( - debug, - "finalized_signed: trying to verify from {:?} score {:?}", - best.who, - best.raw_solution.score - ); let SignedSubmission { raw_solution, who, deposit, call_fee } = best; let active_voters = raw_solution.solution.voter_count() as u32; let feasibility_weight = { @@ -394,7 +386,6 @@ impl Pallet { let desired_targets = Self::desired_targets().defensive_unwrap_or_default(); T::WeightInfo::feasibility_check(voters, targets, active_voters, desired_targets) }; - // the feasibility check itself has some weight weight = weight.saturating_add(feasibility_weight); match Self::feasibility_check(raw_solution, ElectionCompute::Signed) { @@ -406,14 +397,12 @@ impl Pallet { call_fee, ); found_solution = true; - log!(debug, "finalized_signed: found a valid solution"); weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); break }, Err(_) => { - log!(warn, "finalized_signed: invalid signed submission found, slashing."); Self::finalize_signed_phase_reject_solution(&who, deposit); weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_reject_solution()); @@ -462,7 +451,7 @@ impl Pallet { /// /// Infallible pub fn finalize_signed_phase_accept_solution( - ready_solution: ReadySolution, + ready_solution: ReadySolution, who: &T::AccountId, deposit: BalanceOf, call_fee: BalanceOf, @@ -525,8 +514,8 @@ impl Pallet { let feasibility_weight = Self::solution_weight_of(raw_solution, size); let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); - let weight_deposit = T::SignedDepositWeight::get() - .saturating_mul(feasibility_weight.ref_time().saturated_into()); + let weight_deposit = + T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); T::SignedDepositBase::get() .saturating_add(len_deposit) @@ -537,7 +526,13 @@ impl Pallet { #[cfg(test)] mod tests { use super::*; - use crate::{mock::*, ElectionCompute, ElectionError, Error, Event, Perbill, Phase}; + use crate::{ + mock::{ + balances, raw_solution, roll_to, Balances, ExtBuilder, MockedWeightInfo, MultiPhase, + Origin, Runtime, SignedMaxRefunds, SignedMaxSubmissions, SignedMaxWeight, + }, + Error, Perbill, Phase, + }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; #[test] @@ -551,110 +546,49 @@ mod tests { let solution = raw_solution(); assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(10), Box::new(solution)), + MultiPhase::submit(Origin::signed(10), Box::new(solution)), Error::::PreDispatchEarlySubmission, ); }) } - #[test] - fn data_provider_should_respect_target_limits() { - ExtBuilder::default().build_and_execute(|| { - // given a reduced expectation of maximum electable targets - MaxElectableTargets::set(2); - // and a data provider that does not respect limits - DataProviderAllowBadData::set(true); - - assert_noop!( - MultiPhase::create_snapshot(), - ElectionError::DataProvider("Snapshot too big for submission."), - ); - }) - } - - #[test] - fn data_provider_should_respect_voter_limits() { - ExtBuilder::default().build_and_execute(|| { - // given a reduced expectation of maximum electing voters - MaxElectingVoters::set(2); - // and a data provider that does not respect limits - DataProviderAllowBadData::set(true); - - assert_noop!( - MultiPhase::create_snapshot(), - ElectionError::DataProvider("Snapshot too big for submission."), - ); - }) - } - - #[test] - fn desired_targets_greater_than_max_winners() { - ExtBuilder::default().build_and_execute(|| { - // given desired_targets bigger than MaxWinners - DesiredTargets::set(4); - MaxWinners::set(3); - - let (_, _, actual_desired_targets) = MultiPhase::create_snapshot_external().unwrap(); - - // snapshot is created with min of desired_targets and MaxWinners - assert_eq!(actual_desired_targets, 3); - }) - } - #[test] fn should_pay_deposit() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); assert_eq!(balances(&99), (100, 0)); - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false } - ] - ); }) } #[test] fn good_solution_is_rewarded() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); assert_eq!(balances(&99), (100, 0)); - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); assert!(MultiPhase::finalize_signed_phase()); assert_eq!(balances(&99), (100 + 7 + 8, 0)); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Rewarded { account: 99, value: 7 } - ] - ); }) } #[test] fn bad_solution_is_slashed() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -663,29 +597,20 @@ mod tests { // make the solution invalid. solution.score.minimal_stake += 1; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); // no good solution was stored. assert!(!MultiPhase::finalize_signed_phase()); // and the bond is gone. assert_eq!(balances(&99), (95, 0)); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Slashed { account: 99, value: 5 } - ] - ); }) } #[test] fn suppressed_solution_gets_bond_back() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -693,11 +618,11 @@ mod tests { assert_eq!(balances(&999), (100, 0)); // submit as correct. - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution.clone()))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution.clone()))); // make the solution invalid and weaker. solution.score.minimal_stake -= 1; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); assert_eq!(balances(&999), (95, 5)); @@ -708,22 +633,13 @@ mod tests { assert_eq!(balances(&99), (100 + 7 + 8, 0)); // 999 gets everything back, including the call fee. assert_eq!(balances(&999), (100 + 8, 0)); - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Rewarded { account: 99, value: 7 } - ] - ); }) } #[test] fn cannot_submit_worse_with_full_queue() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -732,7 +648,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); } // weaker. @@ -742,7 +658,7 @@ mod tests { }; assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution)), + MultiPhase::submit(Origin::signed(99), Box::new(solution)), Error::::SignedQueueFull, ); }) @@ -751,7 +667,7 @@ mod tests { #[test] fn call_fee_refund_is_limited_by_signed_max_refunds() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(SignedMaxRefunds::get(), 1); assert!(SignedMaxSubmissions::get() > 2); @@ -763,11 +679,11 @@ mod tests { let mut solution = raw_solution(); solution.score.minimal_stake -= s as u128; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(account), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(account), Box::new(solution))); assert_eq!(balances(&account), (95, 5)); } - assert_ok!(MultiPhase::do_elect()); + assert!(MultiPhase::finalize_signed_phase()); for s in 0..SignedMaxSubmissions::get() { let account = 99 + s as u64; @@ -783,26 +699,6 @@ mod tests { assert_eq!(balances(&account), (100, 0)); } } - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Rewarded { account: 99, value: 7 }, - Event::ElectionFinalized { - compute: ElectionCompute::Signed, - score: ElectionScore { - minimal_stake: 40, - sum_stake: 100, - sum_stake_squared: 5200 - } - } - ] - ); }); } @@ -812,7 +708,7 @@ mod tests { .signed_max_submission(1) .better_signed_threshold(Perbill::from_percent(20)) .build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let mut solution = RawSolution { @@ -823,7 +719,7 @@ mod tests { }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); // This is 10% better, so does not meet the 20% threshold and is therefore rejected. solution = RawSolution { @@ -836,7 +732,7 @@ mod tests { }; assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution)), + MultiPhase::submit(Origin::signed(99), Box::new(solution)), Error::::SignedQueueFull, ); @@ -850,28 +746,14 @@ mod tests { ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { - compute: ElectionCompute::Signed, - prev_ejected: false - }, - Event::SolutionStored { - compute: ElectionCompute::Signed, - prev_ejected: true - } - ] - ); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); }) } #[test] fn weakest_is_removed_if_better_provided() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -882,7 +764,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(account), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(account), Box::new(solution))); assert_eq!(balances(&account), (95, 5)); } @@ -899,7 +781,7 @@ mod tests { score: ElectionScore { minimal_stake: 20, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution))); // the one with score 5 was rejected, the new one inserted. assert_eq!( @@ -916,9 +798,9 @@ mod tests { } #[test] - fn replace_weakest_by_score_works() { - ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to_signed(); + fn replace_weakest_works() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); for s in 1..SignedMaxSubmissions::get() { @@ -927,21 +809,21 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); } let solution = RawSolution { score: ElectionScore { minimal_stake: 4, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); assert_eq!( MultiPhase::signed_submissions() .iter() .map(|s| s.raw_solution.score.minimal_stake) .collect::>(), - vec![4, 6, 7], + vec![4, 6, 7, 8, 9], ); // better. @@ -949,7 +831,7 @@ mod tests { score: ElectionScore { minimal_stake: 5, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); // the one with score 5 was rejected, the new one inserted. assert_eq!( @@ -957,7 +839,7 @@ mod tests { .iter() .map(|s| s.raw_solution.score.minimal_stake) .collect::>(), - vec![5, 6, 7], + vec![5, 6, 7, 8, 9], ); }) } @@ -965,7 +847,7 @@ mod tests { #[test] fn early_ejected_solution_gets_bond_back() { ExtBuilder::default().signed_deposit(2, 0, 0).build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -974,7 +856,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); } assert_eq!(balances(&99).1, 2 * 5); @@ -985,7 +867,7 @@ mod tests { score: ElectionScore { minimal_stake: 20, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution))); // got one bond back. assert_eq!(balances(&99).1, 2 * 4); @@ -994,10 +876,9 @@ mod tests { } #[test] - fn equally_good_solution_is_not_accepted_when_queue_full() { - // because in ordering of solutions, an older solution has higher priority and should stay. + fn equally_good_solution_is_not_accepted() { ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); for i in 0..SignedMaxSubmissions::get() { @@ -1005,9 +886,8 @@ mod tests { score: ElectionScore { minimal_stake: (5 + i).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); } - assert_eq!( MultiPhase::signed_submissions() .iter() @@ -1022,105 +902,12 @@ mod tests { ..Default::default() }; assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution)), + MultiPhase::submit(Origin::signed(99), Box::new(solution)), Error::::SignedQueueFull, ); }) } - #[test] - fn equally_good_solution_is_accepted_when_queue_not_full() { - // because in ordering of solutions, an older solution has higher priority and should stay. - ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to(15); - assert!(MultiPhase::current_phase().is_signed()); - - let solution = RawSolution { - score: ElectionScore { minimal_stake: 5, ..Default::default() }, - ..Default::default() - }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); - - assert_eq!( - MultiPhase::signed_submissions() - .iter() - .map(|s| (s.who, s.raw_solution.score.minimal_stake,)) - .collect::>(), - vec![(99, 5)] - ); - - roll_to(16); - let solution = RawSolution { - score: ElectionScore { minimal_stake: 5, ..Default::default() }, - ..Default::default() - }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); - - assert_eq!( - MultiPhase::signed_submissions() - .iter() - .map(|s| (s.who, s.raw_solution.score.minimal_stake,)) - .collect::>(), - vec![(999, 5), (99, 5)] - ); - - let solution = RawSolution { - score: ElectionScore { minimal_stake: 6, ..Default::default() }, - ..Default::default() - }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(9999), Box::new(solution))); - - assert_eq!( - MultiPhase::signed_submissions() - .iter() - .map(|s| (s.who, s.raw_solution.score.minimal_stake,)) - .collect::>(), - vec![(999, 5), (99, 5), (9999, 6)] - ); - }) - } - - #[test] - fn all_equal_score() { - // because in ordering of solutions, an older solution has higher priority and should stay. - ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to(15); - assert!(MultiPhase::current_phase().is_signed()); - - for i in 0..SignedMaxSubmissions::get() { - roll_to((15 + i).into()); - let solution = raw_solution(); - assert_ok!(MultiPhase::submit( - RuntimeOrigin::signed(100 + i as AccountId), - Box::new(solution) - )); - } - - assert_eq!( - MultiPhase::signed_submissions() - .iter() - .map(|s| (s.who, s.raw_solution.score.minimal_stake)) - .collect::>(), - vec![(102, 40), (101, 40), (100, 40)] - ); - - roll_to(25); - - // The first one that will actually get verified is the last one. - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Rewarded { account: 100, value: 7 }, - Event::UnsignedPhaseStarted { round: 1 } - ] - ); - }) - } - #[test] fn all_in_one_signed_submission_scenario() { // a combination of: @@ -1128,28 +915,27 @@ mod tests { // - bad_solution_is_slashed // - suppressed_solution_gets_bond_back ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(balances(&99), (100, 0)); assert_eq!(balances(&999), (100, 0)); assert_eq!(balances(&9999), (100, 0)); - let solution = raw_solution(); // submit a correct one. - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution.clone()))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution.clone()))); // make the solution invalidly better and submit. This ought to be slashed. let mut solution_999 = solution.clone(); solution_999.score.minimal_stake += 1; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution_999))); + assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution_999))); // make the solution invalidly worse and submit. This ought to be suppressed and // returned. let mut solution_9999 = solution.clone(); solution_9999.score.minimal_stake -= 1; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(9999), Box::new(solution_9999))); + assert_ok!(MultiPhase::submit(Origin::signed(9999), Box::new(solution_9999))); assert_eq!( MultiPhase::signed_submissions().iter().map(|x| x.who).collect::>(), @@ -1165,27 +951,16 @@ mod tests { assert_eq!(balances(&999), (95, 0)); // 9999 gets everything back, including the call fee. assert_eq!(balances(&9999), (100 + 8, 0)); - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Slashed { account: 999, value: 5 }, - Event::Rewarded { account: 99, value: 7 } - ] - ); }) } #[test] fn cannot_consume_too_much_future_weight() { ExtBuilder::default() - .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) + .signed_weight(40) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -1196,21 +971,18 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, Weight::from_ref_time(35)); + assert_eq!(solution_weight, 35); assert_eq!(raw.solution.voter_count(), 5); - assert_eq!( - ::SignedMaxWeight::get(), - Weight::from_ref_time(40).set_proof_size(u64::MAX) - ); + assert_eq!(::SignedMaxWeight::get(), 40); - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw.clone()))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(raw.clone()))); - ::set(Weight::from_ref_time(30).set_proof_size(u64::MAX)); + ::set(30); // note: resubmitting the same solution is technically okay as long as the queue has // space. assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw)), + MultiPhase::submit(Origin::signed(99), Box::new(raw)), Error::::SignedTooMuchWeight, ); }) @@ -1219,14 +991,14 @@ mod tests { #[test] fn insufficient_deposit_does_not_store_submission() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); assert_eq!(balances(&123), (0, 0)); assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(123), Box::new(solution)), + MultiPhase::submit(Origin::signed(123), Box::new(solution)), Error::::SignedCannotPayDeposit, ); @@ -1239,7 +1011,7 @@ mod tests { #[test] fn insufficient_deposit_with_full_queue_works_properly() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -1248,7 +1020,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); } // this solution has a higher score than any in the queue @@ -1262,7 +1034,7 @@ mod tests { assert_eq!(balances(&123), (0, 0)); assert_noop!( - MultiPhase::submit(RuntimeOrigin::signed(123), Box::new(solution)), + MultiPhase::submit(Origin::signed(123), Box::new(solution)), Error::::SignedCannotPayDeposit, ); @@ -1285,28 +1057,19 @@ mod tests { #[test] fn finalize_signed_phase_is_idempotent_given_submissions() { ExtBuilder::default().build_and_execute(|| { - roll_to_signed(); + roll_to(15); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); // submit a correct one. - assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); // _some_ good solution was stored. assert!(MultiPhase::finalize_signed_phase()); // calling it again doesn't change anything assert_storage_noop!(MultiPhase::finalize_signed_phase()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, - Event::Rewarded { account: 99, value: 7 } - ] - ); }) } } diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 7340605dfe621..de25355f0ca5b 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -34,7 +34,7 @@ use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, DispatchError, SaturatedConversion, }; -use sp_std::prelude::*; +use sp_std::{cmp::Ordering, prelude::*}; /// Storage key used to store the last block number at which offchain worker ran. pub(crate) const OFFCHAIN_LAST_BLOCK: &[u8] = b"parity/multi-phase-unsigned-election"; @@ -638,17 +638,16 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - if current_weight.all_lt(max_weight) { - let next_voters = voters.checked_add(step); - match next_voters { - Some(voters) if voters < max_voters => Ok(voters), - _ => Err(()), - } - } else if current_weight.any_gt(max_weight) { - voters.checked_sub(step).ok_or(()) - } else { - // If any of the constituent weights is equal to the max weight, we're at max - Ok(voters) + match current_weight.cmp(&max_weight) { + Ordering::Less => { + let next_voters = voters.checked_add(step); + match next_voters { + Some(voters) if voters < max_voters => Ok(voters), + _ => Err(()), + } + }, + Ordering::Greater => voters.checked_sub(step).ok_or(()), + Ordering::Equal => Ok(voters), } }; @@ -673,16 +672,16 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { + while voters < max_voters && weight_with(voters + 1) < max_weight { voters += 1; } - while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { + while voters.checked_sub(1).is_some() && weight_with(voters) > max_weight { voters -= 1; } let final_decision = voters.min(size.voters); debug_assert!( - weight_with(final_decision).all_lte(max_weight), + weight_with(final_decision) <= max_weight, "weight_with({}) <= {}", final_decision, max_weight, @@ -700,348 +699,54 @@ mod max_weight { fn find_max_voter_binary_search_works() { let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; MockWeightInfo::set(crate::mock::MockedWeightInfo::Complex); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::zero().set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(999).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1000).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1001).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1990).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1999).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2000).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2001).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2010).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2990).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2999).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(3000).set_proof_size(u64::MAX) - ), - 3 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(3333).set_proof_size(u64::MAX) - ), - 3 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(5500).set_proof_size(u64::MAX) - ), - 5 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(7777).set_proof_size(u64::MAX) - ), - 7 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(9999).set_proof_size(u64::MAX) - ), - 9 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(10_000).set_proof_size(u64::MAX) - ), - 10 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(10_999).set_proof_size(u64::MAX) - ), - 10 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(11_000).set_proof_size(u64::MAX) - ), - 10 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(22_000).set_proof_size(u64::MAX) - ), - 10 - ); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1990), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2990), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2999), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3000), 3); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 3); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 5500), 5); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 7777), 7); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 9999), 9); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 10_000), 10); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 10_999), 10); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 11_000), 10); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 22_000), 10); let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(0).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(999).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1000).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1001).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1990).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1999).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2000).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2001).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2010).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(3333).set_proof_size(u64::MAX) - ), - 1 - ); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1990), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 1); let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(0).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(999).set_proof_size(u64::MAX) - ), - 0 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1000).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1001).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(1999).set_proof_size(u64::MAX) - ), - 1 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2000).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2001).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(2010).set_proof_size(u64::MAX) - ), - 2 - ); - assert_eq!( - Miner::::maximum_voter_for_weight( - 0, - w, - Weight::from_ref_time(3333).set_proof_size(u64::MAX) - ), - 2 - ); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 2); } } @@ -1050,12 +755,11 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, roll_to, roll_to_signed, roll_to_unsigned, roll_to_with_ocw, - trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, - Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, - UnsignedPhase, + roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, Call as OuterCall, + ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, Origin, Runtime, System, + TestNposSolution, TrimHelpers, UnsignedPhase, }, - CurrentPhase, Event, InvalidTransaction, Phase, QueuedSolution, TransactionSource, + CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, TransactionValidityError, }; use codec::Decode; @@ -1101,7 +805,7 @@ mod tests { )); // signed - roll_to_signed(); + roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert!(matches!( ::validate_unsigned( @@ -1117,7 +821,7 @@ mod tests { )); // unsigned - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); assert!(::validate_unsigned( @@ -1148,7 +852,7 @@ mod tests { #[test] fn validate_unsigned_retracts_low_score() { ExtBuilder::default().desired_targets(0).build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1194,7 +898,7 @@ mod tests { #[test] fn validate_unsigned_retracts_incorrect_winner_count() { ExtBuilder::default().desired_targets(1).build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); let raw = RawSolution:: { @@ -1223,7 +927,7 @@ mod tests { .miner_tx_priority(20) .desired_targets(0) .build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1254,7 +958,7 @@ mod tests { Some(\"PreDispatchWrongWinnerCount\") })")] fn unfeasible_solution_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // This is in itself an invalid BS solution. @@ -1266,8 +970,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: witness(), }; - let runtime_call: RuntimeCall = call.into(); - let _ = runtime_call.dispatch(RuntimeOrigin::none()); + let outer_call: OuterCall = call.into(); + let _ = outer_call.dispatch(Origin::none()); }) } @@ -1276,7 +980,7 @@ mod tests { deprive validator from their authoring reward.")] fn wrong_witness_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // This solution is unfeasible as well, but we won't even get there. @@ -1292,15 +996,15 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: correct_witness, }; - let runtime_call: RuntimeCall = call.into(); - let _ = runtime_call.dispatch(RuntimeOrigin::none()); + let outer_call: OuterCall = call.into(); + let _ = outer_call.dispatch(Origin::none()); }) } #[test] fn miner_works() { ExtBuilder::default().build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -1312,33 +1016,18 @@ mod tests { // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); - assert_ok!(MultiPhase::submit_unsigned( - RuntimeOrigin::none(), - Box::new(solution), - witness - )); + assert_ok!(MultiPhase::submit_unsigned(Origin::none(), Box::new(solution), witness)); assert!(MultiPhase::queued_solution().is_some()); - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::SolutionStored { - compute: ElectionCompute::Unsigned, - prev_ejected: false - } - ] - ); }) } #[test] fn miner_trims_weight() { ExtBuilder::default() - .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) + .miner_weight(100) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -1349,11 +1038,11 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, Weight::from_ref_time(35)); + assert_eq!(solution_weight, 35); assert_eq!(raw.solution.voter_count(), 5); // now reduce the max weight - ::set(Weight::from_ref_time(25).set_proof_size(u64::MAX)); + ::set(25); let (raw, witness) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( @@ -1363,7 +1052,7 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, Weight::from_ref_time(25)); + assert_eq!(solution_weight, 25); assert_eq!(raw.solution.voter_count(), 3); }) } @@ -1372,7 +1061,7 @@ mod tests { fn miner_will_not_submit_if_not_enough_winners() { let (mut ext, _) = ExtBuilder::default().desired_targets(8).build_offchainify(0); ext.execute_with(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // Force the number of winners to be bigger to fail @@ -1398,7 +1087,7 @@ mod tests { .add_voter(8, 5, bounded_vec![10]) .better_unsigned_threshold(Perbill::from_percent(50)) .build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); assert_eq!(MultiPhase::desired_targets().unwrap(), 1); @@ -1426,7 +1115,7 @@ mod tests { let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); assert_ok!(MultiPhase::submit_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(solution), witness )); @@ -1487,7 +1176,7 @@ mod tests { // and it is fine assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); assert_ok!(MultiPhase::submit_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(solution), witness )); @@ -1500,7 +1189,7 @@ mod tests { ext.execute_with(|| { let offchain_repeat = ::OffchainRepeat::get(); - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // first execution -- okay. @@ -1541,7 +1230,7 @@ mod tests { let guard = StorageValueRef::persistent(&OFFCHAIN_LOCK); let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // initially, the lock is not set. @@ -1562,7 +1251,7 @@ mod tests { // ensure that if the guard is in hold, a new execution is not allowed. let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to_unsigned(); + roll_to(25); assert!(MultiPhase::current_phase().is_unsigned()); // artificially set the value, as if another thread is mid-way. @@ -1590,7 +1279,7 @@ mod tests { fn ocw_only_runs_when_unsigned_open_now() { let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to_unsigned(); + roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // we must clear the offchain storage to ensure the offchain execution check doesn't get @@ -1670,21 +1359,6 @@ mod tests { // the submitted solution changes because the cache was cleared. assert_eq!(tx_cache_1, tx_cache_3); - assert_eq!( - multi_phase_events(), - vec![ - Event::SignedPhaseStarted { round: 1 }, - Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - } - ] - ); }) } @@ -1782,7 +1456,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); let call = extrinsic.call; - assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); + assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1799,7 +1473,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); let call = match extrinsic.call { - RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, + OuterCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; @@ -1824,7 +1498,7 @@ mod tests { #[test] fn trim_assignments_length_does_not_modify_when_short_enough() { ExtBuilder::default().build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1849,7 +1523,7 @@ mod tests { #[test] fn trim_assignments_length_modifies_when_too_long() { ExtBuilder::default().build().execute_with(|| { - roll_to_unsigned(); + roll_to(25); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1875,7 +1549,7 @@ mod tests { #[test] fn trim_assignments_length_trims_lowest_stake() { ExtBuilder::default().build().execute_with(|| { - roll_to_unsigned(); + roll_to(25); // given let TrimHelpers { voters, mut assignments, encoded_size_of, voter_index } = @@ -1938,7 +1612,7 @@ mod tests { // or when we trim it to zero. ExtBuilder::default().build_and_execute(|| { // we need snapshot for `trim_helpers` to work. - roll_to_unsigned(); + roll_to(25); let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); assert!(assignments.len() > 0); @@ -1960,7 +1634,7 @@ mod tests { #[test] fn mine_solution_solutions_always_within_acceptable_length() { ExtBuilder::default().build_and_execute(|| { - roll_to_unsigned(); + roll_to(25); // how long would the default solution be? let solution = MultiPhase::mine_solution().unwrap(); diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 221fd5837f7b7..68ce00dd0de32 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/election-provider-multi-phase/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/election-provider-multi-phase/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,52 +68,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - // Minimum execution time: 17_309 nanoseconds. - Weight::from_ref_time(17_646_000 as u64) - .saturating_add(T::DbWeight::get().reads(8 as u64)) + (13_495_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - // Minimum execution time: 17_992 nanoseconds. - Weight::from_ref_time(18_426_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (14_114_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - // Minimum execution time: 17_340 nanoseconds. - Weight::from_ref_time(17_881_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (13_756_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - // Minimum execution time: 35_571 nanoseconds. - Weight::from_ref_time(35_989_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (28_467_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - // Minimum execution time: 27_403 nanoseconds. - Weight::from_ref_time(27_879_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (21_991_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// The range of component `v` is `[1000, 2000]`. - /// The range of component `t` is `[500, 1000]`. - fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { - // Minimum execution time: 571_900 nanoseconds. - Weight::from_ref_time(589_170_000 as u64) - // Standard Error: 7_123 - .saturating_add(Weight::from_ref_time(384_767 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { + (3_186_000 as Weight) + // Standard Error: 1_000 + .saturating_add((202_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 3_000 + .saturating_add((60_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -127,17 +118,14 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) - /// The range of component `a` is `[500, 800]`. - /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { - // Minimum execution time: 1_296_481 nanoseconds. - Weight::from_ref_time(1_076_121_575 as u64) - // Standard Error: 5_708 - .saturating_add(Weight::from_ref_time(474_995 as u64).saturating_mul(a as u64)) - // Standard Error: 8_556 - .saturating_add(Weight::from_ref_time(39_224 as u64).saturating_mul(d as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(8 as u64)) + (137_653_000 as Weight) + // Standard Error: 4_000 + .saturating_add((640_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 6_000 + .saturating_add((48_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -146,10 +134,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - // Minimum execution time: 58_716 nanoseconds. - Weight::from_ref_time(59_480_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (49_313_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -158,36 +145,34 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// The range of component `v` is `[1000, 2000]`. - /// The range of component `t` is `[500, 1000]`. - /// The range of component `a` is `[500, 800]`. - /// The range of component `d` is `[200, 400]`. - fn submit_unsigned(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { - // Minimum execution time: 5_540_737 nanoseconds. - Weight::from_ref_time(5_567_381_000 as u64) - // Standard Error: 18_563 - .saturating_add(Weight::from_ref_time(603_280 as u64).saturating_mul(v as u64)) - // Standard Error: 55_009 - .saturating_add(Weight::from_ref_time(3_164_053 as u64).saturating_mul(a as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((867_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((107_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 12_000 + .saturating_add((6_907_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 18_000 + .saturating_add((1_427_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// The range of component `v` is `[1000, 2000]`. - /// The range of component `t` is `[500, 1000]`. - /// The range of component `a` is `[500, 800]`. - /// The range of component `d` is `[200, 400]`. - fn feasibility_check(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { - // Minimum execution time: 5_021_808 nanoseconds. - Weight::from_ref_time(5_051_856_000 as u64) - // Standard Error: 16_650 - .saturating_add(Weight::from_ref_time(683_344 as u64).saturating_mul(v as u64)) - // Standard Error: 49_342 - .saturating_add(Weight::from_ref_time(2_190_098 as u64).saturating_mul(a as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((844_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 5_000 + .saturating_add((150_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 8_000 + .saturating_add((5_421_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 13_000 + .saturating_add((1_167_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) } } @@ -202,52 +187,46 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - // Minimum execution time: 17_309 nanoseconds. - Weight::from_ref_time(17_646_000 as u64) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) + (13_495_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - // Minimum execution time: 17_992 nanoseconds. - Weight::from_ref_time(18_426_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (14_114_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - // Minimum execution time: 17_340 nanoseconds. - Weight::from_ref_time(17_881_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (13_756_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - // Minimum execution time: 35_571 nanoseconds. - Weight::from_ref_time(35_989_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (28_467_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - // Minimum execution time: 27_403 nanoseconds. - Weight::from_ref_time(27_879_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (21_991_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// The range of component `v` is `[1000, 2000]`. - /// The range of component `t` is `[500, 1000]`. - fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { - // Minimum execution time: 571_900 nanoseconds. - Weight::from_ref_time(589_170_000 as u64) - // Standard Error: 7_123 - .saturating_add(Weight::from_ref_time(384_767 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { + (3_186_000 as Weight) + // Standard Error: 1_000 + .saturating_add((202_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 3_000 + .saturating_add((60_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -258,17 +237,14 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) - /// The range of component `a` is `[500, 800]`. - /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { - // Minimum execution time: 1_296_481 nanoseconds. - Weight::from_ref_time(1_076_121_575 as u64) - // Standard Error: 5_708 - .saturating_add(Weight::from_ref_time(474_995 as u64).saturating_mul(a as u64)) - // Standard Error: 8_556 - .saturating_add(Weight::from_ref_time(39_224 as u64).saturating_mul(d as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(8 as u64)) + (137_653_000 as Weight) + // Standard Error: 4_000 + .saturating_add((640_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 6_000 + .saturating_add((48_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -277,10 +253,9 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - // Minimum execution time: 58_716 nanoseconds. - Weight::from_ref_time(59_480_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (49_313_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -289,35 +264,33 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// The range of component `v` is `[1000, 2000]`. - /// The range of component `t` is `[500, 1000]`. - /// The range of component `a` is `[500, 800]`. - /// The range of component `d` is `[200, 400]`. - fn submit_unsigned(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { - // Minimum execution time: 5_540_737 nanoseconds. - Weight::from_ref_time(5_567_381_000 as u64) - // Standard Error: 18_563 - .saturating_add(Weight::from_ref_time(603_280 as u64).saturating_mul(v as u64)) - // Standard Error: 55_009 - .saturating_add(Weight::from_ref_time(3_164_053 as u64).saturating_mul(a as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((867_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((107_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 12_000 + .saturating_add((6_907_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 18_000 + .saturating_add((1_427_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// The range of component `v` is `[1000, 2000]`. - /// The range of component `t` is `[500, 1000]`. - /// The range of component `a` is `[500, 800]`. - /// The range of component `d` is `[200, 400]`. - fn feasibility_check(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { - // Minimum execution time: 5_021_808 nanoseconds. - Weight::from_ref_time(5_051_856_000 as u64) - // Standard Error: 16_650 - .saturating_add(Weight::from_ref_time(683_344 as u64).saturating_mul(v as u64)) - // Standard Error: 49_342 - .saturating_add(Weight::from_ref_time(2_190_098 as u64).saturating_mul(a as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((844_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 5_000 + .saturating_add((150_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 8_000 + .saturating_add((5_421_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 13_000 + .saturating_add((1_167_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) } } diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index 5d064c770f8d9..67e1ea63cb655 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -31,7 +31,6 @@ sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elect [features] default = ["std"] -fuzz = ["default"] std = [ "codec/std", "frame-support/std", diff --git a/frame/election-provider-support/benchmarking/Cargo.toml b/frame/election-provider-support/benchmarking/Cargo.toml index 0f296d9a70ee0..00037d460db17 100644 --- a/frame/election-provider-support/benchmarking/Cargo.toml +++ b/frame/election-provider-support/benchmarking/Cargo.toml @@ -25,7 +25,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../../pri default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-election-provider-support/std", "frame-system/std", "sp-npos-elections/std", diff --git a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 2cc620452586d..10f82cd316851 100644 --- a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 38924a18e2f54..eee865d0b737b 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -20,11 +20,10 @@ //! This crate provides two traits that could interact to enable extensible election functionality //! within FRAME pallets. //! -//! Something that will provide the functionality of election will implement -//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an -//! associated [`ElectionProviderBase::DataProvider`], which needs to be -//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* -//! the receiver of the election, resulting in a diagram as below: +//! Something that will provide the functionality of election will implement [`ElectionProvider`], +//! whilst needing an associated [`ElectionProvider::DataProvider`], which needs to be fulfilled by +//! an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* the receiver +//! of the election, resulting in a diagram as below: //! //! ```ignore //! ElectionDataProvider @@ -82,7 +81,6 @@ //! # use frame_election_provider_support::{*, data_provider}; //! # use sp_npos_elections::{Support, Assignment}; //! # use frame_support::traits::ConstU32; -//! # use frame_support::bounded_vec; //! //! type AccountId = u64; //! type Balance = u64; @@ -133,21 +131,16 @@ //! type DataProvider: ElectionDataProvider; //! } //! -//! impl ElectionProviderBase for GenericElectionProvider { +//! impl ElectionProvider for GenericElectionProvider { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; //! type Error = &'static str; //! type DataProvider = T::DataProvider; -//! type MaxWinners = ConstU32<{ u32::MAX }>; -//! -//! } //! -//! impl ElectionProvider for GenericElectionProvider { -//! fn ongoing() -> bool { false } -//! fn elect() -> Result, Self::Error> { +//! fn elect() -> Result, Self::Error> { //! Self::DataProvider::electable_targets(None) //! .map_err(|_| "failed to elect") -//! .map(|t| bounded_vec![(t[0], Support::default())]) +//! .map(|t| vec![(t[0], Support::default())]) //! } //! } //! } @@ -184,8 +177,8 @@ pub use frame_support::{traits::Get, weights::Weight, BoundedVec, RuntimeDebug}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance, - IdentifierT, PerThing128, Support, Supports, VoteWeight, + Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128, + Support, Supports, VoteWeight, }; pub use traits::NposSolution; @@ -356,8 +349,12 @@ pub trait ElectionDataProvider { fn clear() {} } -/// Base trait for types that can provide election -pub trait ElectionProviderBase { +/// Something that can compute the result of an election and pass it back to the caller. +/// +/// This trait only provides an interface to _request_ an election, i.e. +/// [`ElectionProvider::elect`]. That data required for the election need to be passed to the +/// implemented of this trait through [`ElectionProvider::DataProvider`]. +pub trait ElectionProvider { /// The account identifier type. type AccountId; @@ -367,109 +364,61 @@ pub trait ElectionProviderBase { /// The error type that is returned by the provider. type Error: Debug; - /// The upper bound on election winners that can be returned. - /// - /// # WARNING - /// - /// when communicating with the data provider, one must ensure that - /// `DataProvider::desired_targets` returns a value less than this bound. An - /// implementation can chose to either return an error and/or sort and - /// truncate the output to meet this bound. - type MaxWinners: Get; - /// The data provider of the election. type DataProvider: ElectionDataProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, >; - /// checked call to `Self::DataProvider::desired_targets()` ensuring the value never exceeds - /// [`Self::MaxWinners`]. - fn desired_targets_checked() -> data_provider::Result { - match Self::DataProvider::desired_targets() { - Ok(desired_targets) => - if desired_targets <= Self::MaxWinners::get() { - Ok(desired_targets) - } else { - Err("desired_targets should not be greater than MaxWinners") - }, - Err(e) => Err(e), - } - } + /// Elect a new set of winners, without specifying any bounds on the amount of data fetched from + /// [`Self::DataProvider`]. An implementation could nonetheless impose its own custom limits. + /// + /// The result is returned in a target major format, namely as *vector of supports*. + /// + /// This should be implemented as a self-weighing function. The implementor should register its + /// appropriate weight at the end of execution with the system pallet directly. + fn elect() -> Result, Self::Error>; } -/// Elect a new set of winners, bounded by `MaxWinners`. -/// -/// It must always use [`ElectionProviderBase::DataProvider`] to fetch the data it needs. +/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure an election needs to +/// happen instantly, not asynchronously. /// -/// This election provider that could function asynchronously. This implies that this election might -/// needs data ahead of time (ergo, receives no arguments to `elect`), and might be `ongoing` at -/// times. -pub trait ElectionProvider: ElectionProviderBase { - /// Indicate if this election provider is currently ongoing an asynchronous election or not. - fn ongoing() -> bool; - - /// Performs the election. This should be implemented as a self-weighing function. The - /// implementor should register its appropriate weight at the end of execution with the - /// system pallet directly. - fn elect() -> Result, Self::Error>; -} - -/// A (almost) marker trait that signifies an election provider as working synchronously. i.e. being -/// *instant*. +/// The same `DataProvider` is assumed to be used. /// -/// This must still use the same data provider as with [`ElectionProviderBase::DataProvider`]. -/// However, it can optionally overwrite the amount of voters and targets that are fetched from the -/// data provider at runtime via `forced_input_voters_bound` and `forced_input_target_bound`. -pub trait InstantElectionProvider: ElectionProviderBase { - fn instant_elect( - forced_input_voters_bound: Option, - forced_input_target_bound: Option, - ) -> Result, Self::Error>; +/// Consequently, allows for control over the amount of data that is being fetched from the +/// [`ElectionProvider::DataProvider`]. +pub trait InstantElectionProvider: ElectionProvider { + /// Elect a new set of winners, but unlike [`ElectionProvider::elect`] which cannot enforce + /// bounds, this trait method can enforce bounds on the amount of data provided by the + /// `DataProvider`. + /// + /// An implementing type, if itself bounded, should choose the minimum of the two bounds to + /// choose the final value of `max_voters` and `max_targets`. In other words, an implementation + /// should guarantee that `max_voter` and `max_targets` provided to this method are absolutely + /// respected. + fn elect_with_bounds( + max_voters: usize, + max_targets: usize, + ) -> Result, Self::Error>; } -/// An election provider that does nothing whatsoever. +/// An election provider to be used only for testing. +#[cfg(feature = "std")] pub struct NoElection(sp_std::marker::PhantomData); -impl ElectionProviderBase - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> +#[cfg(feature = "std")] +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider)> where DataProvider: ElectionDataProvider, - MaxWinners: Get, { type AccountId = AccountId; type BlockNumber = BlockNumber; type Error = &'static str; - type MaxWinners = MaxWinners; type DataProvider = DataProvider; -} -impl ElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> -where - DataProvider: ElectionDataProvider, - MaxWinners: Get, -{ - fn ongoing() -> bool { - false - } - - fn elect() -> Result, Self::Error> { - Err("`NoElection` cannot do anything.") - } -} - -impl InstantElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> -where - DataProvider: ElectionDataProvider, - MaxWinners: Get, -{ - fn instant_elect( - _: Option, - _: Option, - ) -> Result, Self::Error> { - Err("`NoElection` cannot do anything.") + fn elect() -> Result, Self::Error> { + Err(" cannot do anything.") } } @@ -564,13 +513,15 @@ pub trait SortedListProvider { /// unbounded amount of storage accesses. fn unsafe_clear(); - /// Check internal state of list. Only meant for debugging. - fn try_state() -> Result<(), &'static str>; + /// Sanity check internal state of list. Only meant for debug compilation. + fn sanity_check() -> Result<(), &'static str>; /// If `who` changes by the returned amount they are guaranteed to have a worst case change /// in their list position. #[cfg(feature = "runtime-benchmarks")] - fn score_update_worst_case(_who: &AccountId, _is_increase: bool) -> Self::Score; + fn score_update_worst_case(_who: &AccountId, _is_increase: bool) -> Self::Score { + Self::Score::max_value() + } } /// Something that can provide the `Score` of an account. Similar to [`ElectionProvider`] and @@ -582,8 +533,8 @@ pub trait ScoreProvider { /// Get the current `Score` of `who`. fn score(who: &AccountId) -> Self::Score; - /// For tests, benchmarks and fuzzing, set the `score`. - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] + /// For tests and benchmarks, set the `score`. + #[cfg(any(feature = "runtime-benchmarks", test))] fn set_score_of(_: &AccountId, _: Self::Score) {} } @@ -667,9 +618,3 @@ pub type Voter = (AccountId, VoteWeight, BoundedVec = Voter<::AccountId, ::MaxVotesPerVoter>; - -/// Same as `BoundedSupports` but parameterized by a `ElectionProviderBase`. -pub type BoundedSupportsOf = BoundedSupports< - ::AccountId, - ::MaxWinners, ->; diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 483c402fe249c..62e76c3888822 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,13 +20,10 @@ //! careful when using it onchain. use crate::{ - BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, - InstantElectionProvider, NposSolver, WeightInfo, -}; -use frame_support::{dispatch::DispatchClass, traits::Get}; -use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, to_supports, BoundedSupports, ElectionResult, VoteWeight, + Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolver, WeightInfo, }; +use frame_support::{traits::Get, weights::DispatchClass}; +use sp_npos_elections::*; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; /// Errors of the on-chain election. @@ -36,9 +33,6 @@ pub enum Error { NposElections(sp_npos_elections::Error), /// Errors from the data provider. DataProvider(&'static str), - /// Configurational error caused by `desired_targets` requested by data provider exceeding - /// `MaxWinners`. - TooManyWinners, } impl From for Error { @@ -49,71 +43,65 @@ impl From for Error { /// A simple on-chain implementation of the election provider trait. /// -/// This implements both `ElectionProvider` and `InstantElectionProvider`. +/// This will accept voting data on the fly and produce the results immediately. +/// +/// The [`ElectionProvider`] implementation of this type does not impose any dynamic limits on the +/// number of voters and targets that are fetched. This could potentially make this unsuitable for +/// execution onchain. One could, however, impose bounds on it by using `BoundedExecution` using the +/// `MaxVoters` and `MaxTargets` bonds in the `BoundedConfig` trait. +/// +/// On the other hand, the [`InstantElectionProvider`] implementation does limit these inputs +/// dynamically. If you use `elect_with_bounds` along with `InstantElectionProvider`, the bound that +/// would be used is the minimum of the dynamic bounds given as arguments to `elect_with_bounds` and +/// the trait bounds (`MaxVoters` and `MaxTargets`). /// -/// This type has some utilities to make it safe. Nonetheless, it should be used with utmost care. A -/// thoughtful value must be set as [`Config::VotersBound`] and [`Config::TargetsBound`] to ensure -/// the size of the input is sensible. -pub struct OnChainExecution(PhantomData); +/// Please use `BoundedExecution` at all times except at genesis or for testing, with thoughtful +/// bounds in order to bound the potential execution time. Limit the use `UnboundedExecution` at +/// genesis or for testing, as it does not bound the inputs. However, this can be used with +/// `[InstantElectionProvider::elect_with_bounds`] that dynamically imposes limits. +pub struct BoundedExecution(PhantomData); -#[deprecated(note = "use OnChainExecution, which is bounded by default")] -pub type BoundedExecution = OnChainExecution; +/// An unbounded variant of [`BoundedExecution`]. +/// +/// ### Warning +/// +/// This can be very expensive to run frequently on-chain. Use with care. +pub struct UnboundedExecution(PhantomData); /// Configuration trait for an onchain election execution. pub trait Config { /// Needed for weight registration. type System: frame_system::Config; - /// `NposSolver` that should be used, an example would be `PhragMMS`. type Solver: NposSolver< AccountId = ::AccountId, Error = sp_npos_elections::Error, >; - /// Something that provides the data for election. type DataProvider: ElectionDataProvider< AccountId = ::AccountId, BlockNumber = ::BlockNumber, >; - /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; +} - /// Upper bound on maximum winners from electable targets. - /// - /// As noted in the documentation of [`ElectionProviderBase::MaxWinners`], this value should - /// always be more than `DataProvider::desired_target`. - type MaxWinners: Get; - - /// Bounds the number of voters, when calling into [`Config::DataProvider`]. It might be - /// overwritten in the `InstantElectionProvider` impl. +pub trait BoundedConfig: Config { + /// Bounds the number of voters. type VotersBound: Get; - - /// Bounds the number of targets, when calling into [`Config::DataProvider`]. It might be - /// overwritten in the `InstantElectionProvider` impl. + /// Bounds the number of targets. type TargetsBound: Get; } -/// Same as `BoundedSupportsOf` but for `onchain::Config`. -pub type OnChainBoundedSupportsOf = BoundedSupports< - <::System as frame_system::Config>::AccountId, - ::MaxWinners, ->; - -fn elect_with_input_bounds( +fn elect_with( maybe_max_voters: Option, maybe_max_targets: Option, -) -> Result, Error> { +) -> Result::AccountId>, Error> { let voters = T::DataProvider::electing_voters(maybe_max_voters).map_err(Error::DataProvider)?; let targets = T::DataProvider::electable_targets(maybe_max_targets).map_err(Error::DataProvider)?; let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; - if desired_targets > T::MaxWinners::get() { - // early exit - return Err(Error::TooManyWinners) - } - let voters_len = voters.len() as u32; let targets_len = targets.len() as u32; @@ -141,43 +129,57 @@ fn elect_with_input_bounds( DispatchClass::Mandatory, ); - // defensive: Since npos solver returns a result always bounded by `desired_targets`, this is - // never expected to happen as long as npos solver does what is expected for it to do. - let supports: OnChainBoundedSupportsOf = - to_supports(&staked).try_into().map_err(|_| Error::TooManyWinners)?; - - Ok(supports) + Ok(to_supports(&staked)) } -impl ElectionProviderBase for OnChainExecution { +impl ElectionProvider for UnboundedExecution { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; type Error = Error; - type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + // This should not be called if not in `std` mode (and therefore neither in genesis nor in + // testing) + if cfg!(not(feature = "std")) { + frame_support::log::error!( + "Please use `InstantElectionProvider` instead to provide bounds on election if not in \ + genesis or testing mode" + ); + } + + elect_with::(None, None) + } } -impl InstantElectionProvider for OnChainExecution { - fn instant_elect( - forced_input_voters_bound: Option, - forced_input_target_bound: Option, - ) -> Result, Self::Error> { - elect_with_input_bounds::( - Some(T::VotersBound::get().min(forced_input_voters_bound.unwrap_or(u32::MAX)) as usize), - Some(T::TargetsBound::get().min(forced_input_target_bound.unwrap_or(u32::MAX)) as usize), - ) +impl InstantElectionProvider for UnboundedExecution { + fn elect_with_bounds( + max_voters: usize, + max_targets: usize, + ) -> Result, Self::Error> { + elect_with::(Some(max_voters), Some(max_targets)) } } -impl ElectionProvider for OnChainExecution { - fn ongoing() -> bool { - false +impl ElectionProvider for BoundedExecution { + type AccountId = ::AccountId; + type BlockNumber = ::BlockNumber; + type Error = Error; + type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + elect_with::(Some(T::VotersBound::get() as usize), Some(T::TargetsBound::get() as usize)) } +} - fn elect() -> Result, Self::Error> { - elect_with_input_bounds::( - Some(T::VotersBound::get() as usize), - Some(T::TargetsBound::get() as usize), +impl InstantElectionProvider for BoundedExecution { + fn elect_with_bounds( + max_voters: usize, + max_targets: usize, + ) -> Result, Self::Error> { + elect_with::( + Some(max_voters.min(T::VotersBound::get() as usize)), + Some(max_targets.min(T::TargetsBound::get() as usize)), ) } } @@ -185,8 +187,8 @@ impl ElectionProvider for OnChainExecution { #[cfg(test)] mod tests { use super::*; - use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; - use frame_support::{assert_noop, parameter_types, traits::ConstU32}; + use crate::{PhragMMS, SequentialPhragmen}; + use frame_support::traits::ConstU32; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; @@ -209,16 +211,16 @@ mod tests { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountId; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = (); + type Event = (); type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -236,17 +238,14 @@ mod tests { struct PhragmenParams; struct PhragMMSParams; - parameter_types! { - pub static MaxWinners: u32 = 10; - pub static DesiredTargets: u32 = 2; - } - impl Config for PhragmenParams { type System = Runtime; type Solver = SequentialPhragmen; type DataProvider = mock_data_provider::DataProvider; type WeightInfo = (); - type MaxWinners = MaxWinners; + } + + impl BoundedConfig for PhragmenParams { type VotersBound = ConstU32<600>; type TargetsBound = ConstU32<400>; } @@ -256,7 +255,9 @@ mod tests { type Solver = PhragMMS; type DataProvider = mock_data_provider::DataProvider; type WeightInfo = (); - type MaxWinners = MaxWinners; + } + + impl BoundedConfig for PhragMMSParams { type VotersBound = ConstU32<600>; type TargetsBound = ConstU32<400>; } @@ -285,7 +286,7 @@ mod tests { } fn desired_targets() -> data_provider::Result { - Ok(DesiredTargets::get()) + Ok(2) } fn next_election_prediction(_: BlockNumber) -> BlockNumber { @@ -298,7 +299,7 @@ mod tests { fn onchain_seq_phragmen_works() { sp_io::TestExternalities::new_empty().execute_with(|| { assert_eq!( - as ElectionProvider>::elect().unwrap(), + BoundedExecution::::elect().unwrap(), vec![ (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) @@ -307,25 +308,11 @@ mod tests { }) } - #[test] - fn too_many_winners_when_desired_targets_exceed_max_winners() { - sp_io::TestExternalities::new_empty().execute_with(|| { - // given desired targets larger than max winners - DesiredTargets::set(10); - MaxWinners::set(9); - - assert_noop!( - as ElectionProvider>::elect(), - Error::TooManyWinners, - ); - }) - } - #[test] fn onchain_phragmms_works() { sp_io::TestExternalities::new_empty().execute_with(|| { assert_eq!( - as ElectionProvider>::elect().unwrap(), + BoundedExecution::::elect().unwrap(), vec![ (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index 44075ba871228..c603b196519b5 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -53,43 +53,43 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + (0 as Weight) // Standard Error: 667_000 - .saturating_add(Weight::from_ref_time(32_973_000 as u64).saturating_mul(v as u64)) + .saturating_add((32_973_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 1_334_000 - .saturating_add(Weight::from_ref_time(1_334_000 as u64).saturating_mul(t as u64)) + .saturating_add((1_334_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 60_644_000 - .saturating_add(Weight::from_ref_time(2_636_364_000 as u64).saturating_mul(d as u64)) + .saturating_add((2_636_364_000 as Weight).saturating_mul(d as Weight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + (0 as Weight) // Standard Error: 73_000 - .saturating_add(Weight::from_ref_time(21_073_000 as u64).saturating_mul(v as u64)) + .saturating_add((21_073_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 146_000 - .saturating_add(Weight::from_ref_time(65_000 as u64).saturating_mul(t as u64)) + .saturating_add((65_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 6_649_000 - .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) + .saturating_add((1_711_424_000 as Weight).saturating_mul(d as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + (0 as Weight) // Standard Error: 667_000 - .saturating_add(Weight::from_ref_time(32_973_000 as u64).saturating_mul(v as u64)) + .saturating_add((32_973_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 1_334_000 - .saturating_add(Weight::from_ref_time(1_334_000 as u64).saturating_mul(t as u64)) + .saturating_add((1_334_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 60_644_000 - .saturating_add(Weight::from_ref_time(2_636_364_000 as u64).saturating_mul(d as u64)) + .saturating_add((2_636_364_000 as Weight).saturating_mul(d as Weight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + (0 as Weight) // Standard Error: 73_000 - .saturating_add(Weight::from_ref_time(21_073_000 as u64).saturating_mul(v as u64)) + .saturating_add((21_073_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 146_000 - .saturating_add(Weight::from_ref_time(65_000 as u64).saturating_mul(t as u64)) + .saturating_add((65_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 6_649_000 - .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) + .saturating_add((1_711_424_000 as Weight).saturating_mul(d as Weight)) } } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 2d71a6bed39df..d71a74f76a114 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -36,7 +36,6 @@ substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 06ac8d7c60162..9a9d448449eca 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -29,12 +29,14 @@ use crate::Pallet as Elections; const BALANCE_FACTOR: u32 = 250; +type Lookup = <::Lookup as StaticLookup>::Source; + /// grab new account with infinite balance. fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); // Fund each account with at-least his stake but still a sane amount as to not mess up // the vote calculation. - let amount = default_stake::(T::MaxVoters::get()) * BalanceOf::::from(BALANCE_FACTOR); + let amount = default_stake::(MAX_VOTERS) * BalanceOf::::from(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); // important to increase the total issuance since T::CurrencyToVote will need it to be sane for // phragmen to work. @@ -44,7 +46,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { } /// Account to lookup type of system trait. -fn as_lookup(account: T::AccountId) -> AccountIdLookupOf { +fn as_lookup(account: T::AccountId) -> Lookup { T::Lookup::unlookup(account) } @@ -228,7 +230,7 @@ benchmarks! { submit_candidacy { // number of already existing candidates. - let c in 1 .. T::MaxCandidates::get(); + let c in 1 .. MAX_CANDIDATES; // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -259,7 +261,7 @@ benchmarks! { // this will check members, runners-up and candidate for removal. Members and runners-up are // limited by the runtime bound, nonetheless we fill them by `m`. // number of already existing candidates. - let c in 1 .. T::MaxCandidates::get(); + let c in 1 .. MAX_CANDIDATES; // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -360,14 +362,14 @@ benchmarks! { clean_defunct_voters { // total number of voters. - let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); + let v in (MAX_VOTERS / 2) .. MAX_VOTERS; // those that are defunct and need removal. - let d in 0 .. (T::MaxVoters::get() / 2); + let d in 1 .. (MAX_VOTERS / 2); // remove any previous stuff. clean::(); - let all_candidates = submit_candidates::(T::MaxCandidates::get(), "candidates")?; + let all_candidates = submit_candidates::(MAX_CANDIDATES, "candidates")?; distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; // all candidates leave. @@ -387,9 +389,9 @@ benchmarks! { // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. // Yet, change the number of voters, candidates and edge per voter to see the impact. Note // that we give all candidates a self vote to make sure they are all considered. - let c in 1 .. T::MaxCandidates::get(); - let v in 1 .. T::MaxVoters::get(); - let e in (T::MaxVoters::get()) .. T::MaxVoters::get() as u32 * MAXIMUM_VOTE as u32; + let c in 1 .. MAX_CANDIDATES; + let v in 1 .. MAX_VOTERS; + let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; clean::(); // so we have a situation with v and e. we want e to basically always be in the range of `e @@ -423,9 +425,9 @@ benchmarks! { #[extra] election_phragmen_c_e { - let c in 1 .. T::MaxCandidates::get(); - let e in (T::MaxVoters::get()) .. T::MaxVoters::get() * MAXIMUM_VOTE as u32; - let fixed_v = T::MaxVoters::get(); + let c in 1 .. MAX_CANDIDATES; + let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; + let fixed_v = MAX_VOTERS; clean::(); let votes_per_voter = e / fixed_v; @@ -457,7 +459,7 @@ benchmarks! { #[extra] election_phragmen_v { let v in 4 .. 16; - let fixed_c = T::MaxCandidates::get() / 10; + let fixed_c = MAX_CANDIDATES / 10; let fixed_e = 64; clean::(); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 165a8fcab429b..28fed28f18e5c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -125,6 +125,17 @@ pub mod migrations; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; +// Some safe temp values to make the wasm execution sane while we still use this pallet. +#[cfg(test)] +pub(crate) const MAX_CANDIDATES: u32 = 100; +#[cfg(not(test))] +pub(crate) const MAX_CANDIDATES: u32 = 1000; + +#[cfg(test)] +pub(crate) const MAX_VOTERS: u32 = 1000; +#[cfg(not(test))] +pub(crate) const MAX_VOTERS: u32 = 10 * 1000; + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency< @@ -176,8 +187,6 @@ pub struct SeatHolder { pub use pallet::*; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -195,7 +204,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Identifier for the elections-phragmen pallet's lock #[pallet::constant] @@ -250,21 +259,6 @@ pub mod pallet { #[pallet::constant] type TermDuration: Get; - /// The maximum number of candidates in a phragmen election. - /// - /// Warning: The election happens onchain, and this value will determine - /// the size of the election. When this limit is reached no more - /// candidates are accepted in the election. - #[pallet::constant] - type MaxCandidates: Get; - - /// The maximum number of voters to allow in a phragmen election. - /// - /// Warning: This impacts the size of the election which is run onchain. - /// When the limit is reached the new voters are ignored. - #[pallet::constant] - type MaxVoters: Get; - /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -279,7 +273,7 @@ pub mod pallet { if !term_duration.is_zero() && (n % term_duration).is_zero() { Self::do_phragmen() } else { - Weight::zero() + 0 } } } @@ -363,7 +357,7 @@ pub mod pallet { T::Currency::set_lock(T::PalletId::get(), &who, locked_stake, WithdrawReasons::all()); Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); - Ok(None::.into()) + Ok(None.into()) } /// Remove `origin` as a voter. @@ -372,11 +366,11 @@ pub mod pallet { /// /// The dispatch origin of this call must be signed and be a voter. #[pallet::weight(T::WeightInfo::remove_voter())] - pub fn remove_voter(origin: OriginFor) -> DispatchResult { + pub fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); Self::do_remove_voter(&who); - Ok(()) + Ok(None.into()) } /// Submit oneself for candidacy. A fixed amount of deposit is recorded. @@ -398,15 +392,12 @@ pub mod pallet { pub fn submit_candidacy( origin: OriginFor, #[pallet::compact] candidate_count: u32, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let actual_count = >::decode_len().unwrap_or(0) as u32; ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); - ensure!( - actual_count <= ::MaxCandidates::get(), - Error::::TooManyCandidates - ); + ensure!(actual_count <= MAX_CANDIDATES, Error::::TooManyCandidates); let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; @@ -417,7 +408,7 @@ pub mod pallet { .map_err(|_| Error::::InsufficientCandidateFunds)?; >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); - Ok(()) + Ok(None.into()) } /// Renounce one's intention to be a candidate for the next election round. 3 potential @@ -443,7 +434,10 @@ pub mod pallet { Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), })] - pub fn renounce_candidacy(origin: OriginFor, renouncing: Renouncing) -> DispatchResult { + pub fn renounce_candidacy( + origin: OriginFor, + renouncing: Renouncing, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; match renouncing { Renouncing::Member => { @@ -479,7 +473,7 @@ pub mod pallet { })?; }, }; - Ok(()) + Ok(None.into()) } /// Remove a particular member from the set. This is effective immediately and the bond of @@ -507,10 +501,10 @@ pub mod pallet { })] pub fn remove_member( origin: OriginFor, - who: AccountIdLookupOf, + who: ::Source, slash_bond: bool, rerun_election: bool, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; @@ -522,7 +516,7 @@ pub mod pallet { } // no refund needed. - Ok(()) + Ok(None.into()) } /// Clean all voters who are defunct (i.e. they do not serve any purpose at all). The @@ -540,13 +534,13 @@ pub mod pallet { origin: OriginFor, _num_voters: u32, _num_defunct: u32, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let _ = ensure_root(origin)?; >::iter() .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) .for_each(|(dv, _)| Self::do_remove_voter(&dv)); - Ok(()) + Ok(None.into()) } } @@ -919,11 +913,10 @@ impl Pallet { let mut num_edges: u32 = 0; - let max_voters = ::MaxVoters::get() as usize; // used for prime election. let mut voters_and_stakes = Vec::new(); match Voting::::iter().try_for_each(|(voter, Voter { stake, votes, .. })| { - if voters_and_stakes.len() < max_voters { + if voters_and_stakes.len() < MAX_VOTERS as usize { voters_and_stakes.push((voter, stake, votes)); Ok(()) } else { @@ -937,7 +930,7 @@ impl Pallet { "Failed to run election. Number of voters exceeded", ); Self::deposit_event(Event::ElectionError); - return T::DbWeight::get().reads(3 + max_voters as u64) + return T::DbWeight::get().reads(3 + MAX_VOTERS as u64) }, } @@ -1174,9 +1167,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -1184,16 +1175,16 @@ mod tests { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -1208,7 +1199,7 @@ mod tests { impl pallet_balances::Config for Test { type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = frame_system::Pallet; @@ -1275,13 +1266,11 @@ mod tests { parameter_types! { pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; - pub const PhragmenMaxVoters: u32 = 1000; - pub const PhragmenMaxCandidates: u32 = 100; } impl Config for Test { type PalletId = ElectionsPhragmenPalletId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type ChangeMembers = TestChangeMembers; @@ -1295,13 +1284,10 @@ mod tests { type LoserCandidate = (); type KickedMember = (); type WeightInfo = (); - type MaxVoters = PhragmenMaxVoters; - type MaxCandidates = PhragmenMaxCandidates; } pub type Block = sp_runtime::generic::Block; - pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test where @@ -1344,7 +1330,9 @@ mod tests { self } pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { - MEMBERS.with(|m| *m.borrow_mut() = members.iter().map(|(m, _)| *m).collect::>()); + MEMBERS.with(|m| { + *m.borrow_mut() = members.iter().map(|(m, _)| m.clone()).collect::>() + }); self.genesis_members = members; self } @@ -1359,7 +1347,8 @@ mod tests { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { sp_tracing::try_init_simple(); MEMBERS.with(|m| { - *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| *m).collect::>() + *m.borrow_mut() = + self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>() }); let mut ext: sp_io::TestExternalities = GenesisConfig { balances: pallet_balances::GenesisConfig:: { @@ -1488,11 +1477,11 @@ mod tests { ensure_members_has_approval_stake(); } - fn submit_candidacy(origin: RuntimeOrigin) -> sp_runtime::DispatchResult { + fn submit_candidacy(origin: Origin) -> DispatchResultWithPostInfo { Elections::submit_candidacy(origin, Elections::candidates().len() as u32) } - fn vote(origin: RuntimeOrigin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { + fn vote(origin: Origin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { // historical note: helper function was created in a period of time in which the API of vote // call was changing. Currently it is a wrapper for the original call and does not do much. // Nonetheless, totally harmless. @@ -1574,8 +1563,8 @@ mod tests { Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(1))); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(1))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); assert_eq!(Elections::voting(1), Default::default()); assert_eq!(Elections::voting(2), Default::default()); @@ -1668,7 +1657,7 @@ mod tests { assert!(Elections::is_candidate(&2).is_err()); assert_eq!(balances(&1), (10, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); + assert_ok!(submit_candidacy(Origin::signed(1))); assert_eq!(balances(&1), (7, 3)); assert_eq!(candidate_ids(), vec![1]); @@ -1677,7 +1666,7 @@ mod tests { assert!(Elections::is_candidate(&2).is_err()); assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(balances(&2), (17, 3)); assert_eq!(candidate_ids(), vec![1, 2]); @@ -1694,15 +1683,15 @@ mod tests { #[test] fn updating_candidacy_bond_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); assert_eq!(Elections::candidates(), vec![(5, 3)]); // a runtime upgrade changes the bond. CANDIDACY_BOND.with(|v| *v.borrow_mut() = 4); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_eq!(Elections::candidates(), vec![(4, 4), (5, 3)]); // once elected, they each hold their candidacy bond, no more. @@ -1726,13 +1715,13 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(candidate_ids(), Vec::::new()); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(3))); assert_eq!(candidate_ids(), vec![3]); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); + assert_ok!(submit_candidacy(Origin::signed(1))); assert_eq!(candidate_ids(), vec![1, 3]); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(candidate_ids(), vec![1, 2, 3]); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(4))); assert_eq!(candidate_ids(), vec![1, 2, 3, 4]); }); } @@ -1741,12 +1730,9 @@ mod tests { fn dupe_candidate_submission_should_not_work() { ExtBuilder::default().build_and_execute(|| { assert_eq!(candidate_ids(), Vec::::new()); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); + assert_ok!(submit_candidacy(Origin::signed(1))); assert_eq!(candidate_ids(), vec![1]); - assert_noop!( - submit_candidacy(RuntimeOrigin::signed(1)), - Error::::DuplicatedCandidate - ); + assert_noop!(submit_candidacy(Origin::signed(1)), Error::::DuplicatedCandidate); }); } @@ -1754,8 +1740,8 @@ mod tests { fn member_candidacy_submission_should_not_work() { // critically important to make sure that outgoing candidates and losers are not mixed up. ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1764,19 +1750,19 @@ mod tests { assert!(Elections::runners_up().is_empty()); assert!(candidate_ids().is_empty()); - assert_noop!(submit_candidacy(RuntimeOrigin::signed(5)), Error::::MemberSubmit); + assert_noop!(submit_candidacy(Origin::signed(5)), Error::::MemberSubmit); }); } #[test] fn runner_candidate_submission_should_not_work() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 20)); - assert_ok!(vote(RuntimeOrigin::signed(1), vec![3], 10)); + assert_ok!(vote(Origin::signed(2), vec![5, 4], 20)); + assert_ok!(vote(Origin::signed(1), vec![3], 10)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1784,7 +1770,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![3]); - assert_noop!(submit_candidacy(RuntimeOrigin::signed(3)), Error::::RunnerUpSubmit); + assert_noop!(submit_candidacy(Origin::signed(3)), Error::::RunnerUpSubmit); }); } @@ -1793,7 +1779,7 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(candidate_ids(), Vec::::new()); assert_noop!( - submit_candidacy(RuntimeOrigin::signed(7)), + submit_candidacy(Origin::signed(7)), Error::::InsufficientCandidateFunds, ); }); @@ -1805,8 +1791,8 @@ mod tests { assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 18); @@ -1819,8 +1805,8 @@ mod tests { assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 12)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(2), vec![5], 12)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 12); @@ -1832,9 +1818,9 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); // User only locks up to their free balance. assert_eq!(balances(&2), (18, 2)); @@ -1842,7 +1828,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 18); // can update; different stake; different lock and reserve. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 15)); + assert_ok!(vote(Origin::signed(2), vec![5, 4], 15)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 15); assert_eq!(locked_stake_of(&2), 15); @@ -1852,10 +1838,10 @@ mod tests { #[test] fn updated_voting_bond_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(5))); assert_eq!(balances(&2), (20, 0)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 5)); + assert_ok!(vote(Origin::signed(2), vec![5], 5)); assert_eq!(balances(&2), (18, 2)); assert_eq!(voter_deposit(&2), 2); @@ -1865,11 +1851,11 @@ mod tests { // proof that bond changed. assert_eq!(balances(&1), (10, 0)); - assert_ok!(vote(RuntimeOrigin::signed(1), vec![5], 5)); + assert_ok!(vote(Origin::signed(1), vec![5], 5)); assert_eq!(balances(&1), (9, 1)); assert_eq!(voter_deposit(&1), 1); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); assert_eq!(balances(&2), (20, 0)); }) } @@ -1879,11 +1865,11 @@ mod tests { ExtBuilder::default().voter_bond_factor(1).build_and_execute(|| { assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); // initial vote. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 10)); + assert_ok!(vote(Origin::signed(2), vec![5], 10)); // 2 + 1 assert_eq!(balances(&2), (17, 3)); @@ -1892,7 +1878,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 10); // can update; different stake; different lock and reserve. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 15)); + assert_ok!(vote(Origin::signed(2), vec![5, 4], 15)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); assert_eq!(Elections::voting(&2).deposit, 4); @@ -1900,7 +1886,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 15); // stay at two votes with different stake. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 3], 18)); + assert_ok!(vote(Origin::signed(2), vec![5, 3], 18)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); assert_eq!(Elections::voting(&2).deposit, 4); @@ -1908,7 +1894,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 16); // back to 1 vote. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 12)); + assert_ok!(vote(Origin::signed(2), vec![4], 12)); // 2 + 1 assert_eq!(balances(&2), (17, 3)); assert_eq!(Elections::voting(&2).deposit, 3); @@ -1920,17 +1906,17 @@ mod tests { #[test] fn cannot_vote_for_no_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_noop!(vote(RuntimeOrigin::signed(2), vec![], 20), Error::::NoVotes); + assert_noop!(vote(Origin::signed(2), vec![], 20), Error::::NoVotes); }); } #[test] fn can_vote_for_old_members_even_when_no_new_candidates() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![4, 5], 20)); + assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1938,22 +1924,22 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert!(candidate_ids().is_empty()); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![4, 5], 10)); + assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); }); } #[test] fn prime_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1961,7 +1947,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert!(candidate_ids().is_empty()); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![4, 5], 10)); + assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); }); } @@ -1969,20 +1955,17 @@ mod tests { #[test] fn prime_votes_for_exiting_members_are_removed() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(4), - Renouncing::Candidate(3) - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1997,18 +1980,18 @@ mod tests { #[test] fn prime_is_kept_if_other_members_leave() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(members_ids(), vec![5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); @@ -2018,18 +2001,18 @@ mod tests { #[test] fn prime_is_gone_if_renouncing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member)); assert_eq!(members_ids(), vec![4]); assert_eq!(PRIME.with(|p| *p.borrow()), None); @@ -2043,29 +2026,29 @@ mod tests { .balance_factor(10) .build_and_execute(|| { // when we have only candidates - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); assert_noop!( // content of the vote is irrelevant. - vote(RuntimeOrigin::signed(1), vec![9, 99, 999, 9999], 5), + vote(Origin::signed(1), vec![9, 99, 999, 9999], 5), Error::::TooManyVotes, ); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); // now we have 2 members, 1 runner-up, and 1 new candidate - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(1), vec![9, 99, 999, 9999], 5)); + assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5)); assert_noop!( - vote(RuntimeOrigin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), + vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), Error::::TooManyVotes, ); }); @@ -2074,23 +2057,23 @@ mod tests { #[test] fn cannot_vote_for_less_than_ed() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); - assert_noop!(vote(RuntimeOrigin::signed(2), vec![4], 1), Error::::LowBalance); + assert_noop!(vote(Origin::signed(2), vec![4], 1), Error::::LowBalance); }) } #[test] fn can_vote_for_more_than_free_balance_but_moot() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); // User has 100 free and 50 reserved. - assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 100, 50)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 100, 50)); // User tries to vote with 150 tokens. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![4, 5], 150)); + assert_ok!(vote(Origin::signed(2), vec![4, 5], 150)); // We truncate to only their free balance, after reserving additional for voting. assert_eq!(locked_stake_of(&2), 98); assert_eq!(has_lock(&2), 98); @@ -2100,10 +2083,10 @@ mod tests { #[test] fn remove_voter_should_work() { ExtBuilder::default().voter_bond(8).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![5], 30)); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(vote(Origin::signed(3), vec![5], 30)); assert_eq_uvec!(all_voters(), vec![2, 3]); assert_eq!(balances(&2), (12, 8)); @@ -2113,7 +2096,7 @@ mod tests { assert_eq!(votes_of(&2), vec![5]); assert_eq!(votes_of(&3), vec![5]); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); assert_eq_uvec!(all_voters(), vec![3]); assert!(votes_of(&2).is_empty()); @@ -2127,41 +2110,35 @@ mod tests { #[test] fn non_voter_remove_should_not_work() { ExtBuilder::default().build_and_execute(|| { - assert_noop!( - Elections::remove_voter(RuntimeOrigin::signed(3)), - Error::::MustBeVoter - ); + assert_noop!(Elections::remove_voter(Origin::signed(3)), Error::::MustBeVoter); }); } #[test] fn dupe_remove_should_fail() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); assert!(all_voters().is_empty()); - assert_noop!( - Elections::remove_voter(RuntimeOrigin::signed(2)), - Error::::MustBeVoter - ); + assert_noop!(Elections::remove_voter(Origin::signed(2)), Error::::MustBeVoter); }); } #[test] fn removed_voter_should_not_be_counted() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); + assert_ok!(Elections::remove_voter(Origin::signed(4))); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2173,13 +2150,13 @@ mod tests { #[test] fn simple_voting_rounds_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 15)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 15)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); assert_eq_uvec!(all_voters(), vec![2, 3, 4]); @@ -2216,36 +2193,36 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(RuntimeEvent::Elections(super::Event::EmptyTerm)); + System::assert_last_event(Event::Elections(super::Event::EmptyTerm)); }) } #[test] fn all_outgoing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { + System::assert_last_event(Event::Elections(super::Event::NewTerm { new_members: vec![(4, 35), (5, 45)], })); assert_eq!(members_and_stake(), vec![(4, 35), (5, 45)]); assert_eq!(runners_up_and_stake(), vec![]); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(5))); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); + assert_ok!(Elections::remove_voter(Origin::signed(5))); + assert_ok!(Elections::remove_voter(Origin::signed(4))); System::set_block_number(10); Elections::on_initialize(System::block_number()); - System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { + System::assert_last_event(Event::Elections(super::Event::NewTerm { new_members: vec![], })); @@ -2258,11 +2235,11 @@ mod tests { #[test] fn defunct_voter_will_be_counted() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(5))); // This guy's vote is pointless for this round. - assert_ok!(vote(RuntimeOrigin::signed(3), vec![4], 30)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(3), vec![4], 30)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2271,7 +2248,7 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); // but now it has a valid target. - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(4))); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2286,15 +2263,15 @@ mod tests { #[test] fn only_desired_seats_are_chosen() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2307,8 +2284,8 @@ mod tests { #[test] fn phragmen_should_not_self_vote() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2317,7 +2294,7 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); assert!(members_ids().is_empty()); - System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { + System::assert_last_event(Event::Elections(super::Event::NewTerm { new_members: vec![], })); }); @@ -2326,15 +2303,15 @@ mod tests { #[test] fn runners_up_should_be_kept() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![3], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![2], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 50)); + assert_ok!(vote(Origin::signed(2), vec![3], 20)); + assert_ok!(vote(Origin::signed(3), vec![2], 30)); + assert_ok!(vote(Origin::signed(4), vec![5], 40)); + assert_ok!(vote(Origin::signed(5), vec![4], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2353,22 +2330,22 @@ mod tests { #[test] fn runners_up_should_be_next_candidates() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_and_stake(), vec![(4, 35), (5, 45)]); assert_eq!(runners_up_and_stake(), vec![(2, 15), (3, 25)]); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 10)); + assert_ok!(vote(Origin::signed(5), vec![5], 10)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2381,13 +2358,13 @@ mod tests { #[test] fn runners_up_lose_bond_once_outgoing() { ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2395,8 +2372,8 @@ mod tests { assert_eq!(runners_up_ids(), vec![2]); assert_eq!(balances(&2), (15, 5)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2411,17 +2388,17 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(balances(&5), (50, 0)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); assert_eq!(balances(&5), (45, 5)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![5]); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(5))); + assert_ok!(Elections::remove_voter(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); System::set_block_number(10); @@ -2435,10 +2412,10 @@ mod tests { #[test] fn candidates_lose_the_bond_when_outgoing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); + assert_ok!(vote(Origin::signed(4), vec![5], 40)); assert_eq!(balances(&5), (47, 3)); assert_eq!(balances(&3), (27, 3)); @@ -2458,11 +2435,11 @@ mod tests { #[test] fn current_members_are_always_next_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2470,13 +2447,13 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(Elections::election_rounds(), 1); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); + assert_ok!(Elections::remove_voter(Origin::signed(4))); // 5 will persist as candidates despite not being in the list. assert_eq!(candidate_ids(), vec![2, 3]); @@ -2494,15 +2471,15 @@ mod tests { // what I mean by uninterrupted: // given no input or stimulants the same members are re-elected. ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); let check_at_block = |b: u32| { System::set_block_number(b.into()); @@ -2527,11 +2504,11 @@ mod tests { #[test] fn remove_members_triggers_election() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2539,10 +2516,10 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); // a new candidate - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_member(RuntimeOrigin::root(), 4, true, true)); + assert_ok!(Elections::remove_member(Origin::root(), 4, true, true)); assert_eq!(balances(&4), (35, 2)); // slashed assert_eq!(Elections::election_rounds(), 2); // new election round @@ -2553,14 +2530,14 @@ mod tests { #[test] fn seats_should_be_released_when_no_vote() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![3], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(2), vec![3], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); assert_eq!(>::decode_len().unwrap(), 3); @@ -2571,10 +2548,10 @@ mod tests { assert_eq!(members_ids(), vec![3, 5]); assert_eq!(Elections::election_rounds(), 1); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(3))); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); - assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(5))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(3))); + assert_ok!(Elections::remove_voter(Origin::signed(4))); + assert_ok!(Elections::remove_voter(Origin::signed(5))); // meanwhile, no one cares to become a candidate again. System::set_block_number(10); @@ -2587,29 +2564,29 @@ mod tests { #[test] fn incoming_outgoing_are_reported() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(1))); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(3))); // 5 will change their vote and becomes an `outgoing` - assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 8)); + assert_ok!(vote(Origin::signed(5), vec![4], 8)); // 4 will stay in the set - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); // 3 will become a winner - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); // these two are losers. - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); - assert_ok!(vote(RuntimeOrigin::signed(1), vec![1], 10)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(1), vec![1], 10)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2625,7 +2602,7 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - System::assert_has_event(RuntimeEvent::Elections(super::Event::NewTerm { + System::assert_has_event(Event::Elections(super::Event::NewTerm { new_members: vec![(4, 35), (5, 45)], })); }) @@ -2634,12 +2611,12 @@ mod tests { #[test] fn invalid_votes_are_moot() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![10], 50)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![10], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2652,15 +2629,15 @@ mod tests { #[test] fn members_are_sorted_based_on_id_runners_on_merit() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![3], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![2], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 50)); + assert_ok!(vote(Origin::signed(2), vec![3], 20)); + assert_ok!(vote(Origin::signed(3), vec![2], 30)); + assert_ok!(vote(Origin::signed(4), vec![5], 40)); + assert_ok!(vote(Origin::signed(5), vec![4], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2674,19 +2651,19 @@ mod tests { #[test] fn runner_up_replacement_maintains_members_order() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![2], 50)); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![2], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(RuntimeOrigin::root(), 2, true, false)); + assert_ok!(Elections::remove_member(Origin::root(), 2, true, false)); assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2694,15 +2671,15 @@ mod tests { #[test] fn can_renounce_candidacy_member_with_runners_bond_is_refunded() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2710,7 +2687,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![2, 3]); - assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. assert_eq!(members_ids(), vec![3, 5]); @@ -2721,11 +2698,11 @@ mod tests { #[test] fn can_renounce_candidacy_member_without_runners_bond_is_refunded() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2733,7 +2710,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert!(runners_up_ids().is_empty()); - assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. // no replacement @@ -2745,15 +2722,15 @@ mod tests { #[test] fn can_renounce_candidacy_runner_up() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(5), vec![4], 50)); + assert_ok!(vote(Origin::signed(4), vec![5], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2761,10 +2738,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![2, 3]); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(3), - Renouncing::RunnerUp - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. assert_eq!(members_ids(), vec![4, 5]); @@ -2775,25 +2749,22 @@ mod tests { #[test] fn runner_up_replacement_works_when_out_of_order() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![2], 50)); + assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![2], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![2, 4]); assert_eq!(runners_up_ids(), vec![5, 3]); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(3), - Renouncing::RunnerUp - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); assert_eq!(members_ids(), vec![2, 4]); assert_eq!(runners_up_ids(), vec![5]); }); @@ -2802,14 +2773,11 @@ mod tests { #[test] fn can_renounce_candidacy_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); assert_eq!(candidate_ids(), vec![5]); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(5), - Renouncing::Candidate(1) - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(1))); assert_eq!(balances(&5), (50, 0)); assert!(candidate_ids().is_empty()); }) @@ -2819,15 +2787,15 @@ mod tests { fn wrong_renounce_candidacy_should_fail() { ExtBuilder::default().build_and_execute(|| { assert_noop!( - Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::Candidate(0)), + Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(0)), Error::::InvalidRenouncing, ); assert_noop!( - Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::Member), + Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member), Error::::InvalidRenouncing, ); assert_noop!( - Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::RunnerUp), + Elections::renounce_candidacy(Origin::signed(5), Renouncing::RunnerUp), Error::::InvalidRenouncing, ); }) @@ -2836,13 +2804,13 @@ mod tests { #[test] fn non_member_renounce_member_should_fail() { ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2851,7 +2819,7 @@ mod tests { assert_eq!(runners_up_ids(), vec![3]); assert_noop!( - Elections::renounce_candidacy(RuntimeOrigin::signed(3), Renouncing::Member), + Elections::renounce_candidacy(Origin::signed(3), Renouncing::Member), Error::::InvalidRenouncing, ); }) @@ -2860,13 +2828,13 @@ mod tests { #[test] fn non_runner_up_renounce_runner_up_should_fail() { ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2875,7 +2843,7 @@ mod tests { assert_eq!(runners_up_ids(), vec![3]); assert_noop!( - Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::RunnerUp), + Elections::renounce_candidacy(Origin::signed(4), Renouncing::RunnerUp), Error::::InvalidRenouncing, ); }) @@ -2884,33 +2852,27 @@ mod tests { #[test] fn wrong_candidate_count_renounce_should_fail() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); assert_noop!( - Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Candidate(2)), + Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2)), Error::::InvalidWitnessData, ); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(4), - Renouncing::Candidate(3) - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); }) } #[test] fn renounce_candidacy_count_can_overestimate() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); // while we have only 3 candidates. - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(4), - Renouncing::Candidate(4) - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(4))); }) } @@ -2920,13 +2882,13 @@ mod tests { .desired_runners_up(2) .desired_members(1) .build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 5)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 15)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 5)); + assert_ok!(vote(Origin::signed(3), vec![3], 15)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2934,8 +2896,8 @@ mod tests { assert_eq!(members_ids(), vec![5]); assert_eq!(runners_up_ids(), vec![4, 3]); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 10)); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(vote(Origin::signed(2), vec![2], 10)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2956,13 +2918,13 @@ mod tests { .desired_runners_up(2) .desired_members(1) .build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2975,8 +2937,8 @@ mod tests { assert_eq!(balances(&2), (15, 5)); // this guy will shift everyone down. - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2999,13 +2961,13 @@ mod tests { .desired_runners_up(2) .desired_members(1) .build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3018,8 +2980,8 @@ mod tests { assert_eq!(balances(&2), (15, 5)); // swap some votes. - assert_ok!(vote(RuntimeOrigin::signed(4), vec![2], 40)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 20)); + assert_ok!(vote(Origin::signed(4), vec![2], 40)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -3039,13 +3001,13 @@ mod tests { #[test] fn remove_and_replace_member_works() { let setup = || { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3066,10 +3028,7 @@ mod tests { // member removed, no replacement found. ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { setup(); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(3), - Renouncing::RunnerUp - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); assert_eq!(Elections::remove_and_replace_member(&4, false), Ok(false)); assert_eq!(members_ids(), vec![5]); @@ -3092,15 +3051,15 @@ mod tests { .build_and_execute(|| { assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3118,15 +3077,15 @@ mod tests { .build_and_execute(|| { assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3144,15 +3103,15 @@ mod tests { .build_and_execute(|| { assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3167,17 +3126,17 @@ mod tests { #[test] fn dupe_vote_is_moot() { ExtBuilder::default().desired_members(1).build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(1))); // all these duplicate votes will not cause 2 to win. - assert_ok!(vote(RuntimeOrigin::signed(1), vec![2, 2, 2, 2], 5)); - assert_ok!(vote(RuntimeOrigin::signed(2), vec![2, 2, 2, 2], 20)); + assert_ok!(vote(Origin::signed(1), vec![2, 2, 2, 2], 5)); + assert_ok!(vote(Origin::signed(2), vec![2, 2, 2, 2], 20)); - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3189,33 +3148,24 @@ mod tests { #[test] fn remove_defunct_voter_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); // defunct - assert_ok!(vote(RuntimeOrigin::signed(5), vec![5, 4], 5)); + assert_ok!(vote(Origin::signed(5), vec![5, 4], 5)); // defunct - assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 5)); + assert_ok!(vote(Origin::signed(4), vec![4], 5)); // ok - assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 5)); + assert_ok!(vote(Origin::signed(3), vec![3], 5)); // ok - assert_ok!(vote(RuntimeOrigin::signed(2), vec![3, 4], 5)); - - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(5), - Renouncing::Candidate(3) - )); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(4), - Renouncing::Candidate(2) - )); - assert_ok!(Elections::renounce_candidacy( - RuntimeOrigin::signed(3), - Renouncing::Candidate(1) - )); - - assert_ok!(Elections::clean_defunct_voters(RuntimeOrigin::root(), 4, 2)); + assert_ok!(vote(Origin::signed(2), vec![3, 4], 5)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(3))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(1))); + + assert_ok!(Elections::clean_defunct_voters(Origin::root(), 4, 2)); }) } } diff --git a/frame/elections-phragmen/src/migrations/v3.rs b/frame/elections-phragmen/src/migrations/v3.rs index 9ec9c6e7eea6c..b1cdd4be98541 100644 --- a/frame/elections-phragmen/src/migrations/v3.rs +++ b/frame/elections-phragmen/src/migrations/v3.rs @@ -101,14 +101,14 @@ pub fn apply( StorageVersion::new(3).put::>(); - Weight::MAX + Weight::max_value() } else { log::warn!( target: "runtime::elections-phragmen", "Attempted to apply migration to V3 but failed because storage version is {:?}", storage_version, ); - Weight::zero() + 0 } } diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs index 76ef630706c50..e0fc17ec2a12d 100644 --- a/frame/elections-phragmen/src/migrations/v4.rs +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -38,7 +38,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::elections-phragmen", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero() + return 0 } let storage_version = StorageVersion::get::>(); log::info!( @@ -63,7 +63,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { "Attempted to apply migration to v4 but failed because storage version is {:?}", storage_version, ); - Weight::zero() + 0 } } diff --git a/frame/elections-phragmen/src/migrations/v5.rs b/frame/elections-phragmen/src/migrations/v5.rs index eb35c1fae0f29..a9fb018ba0219 100644 --- a/frame/elections-phragmen/src/migrations/v5.rs +++ b/frame/elections-phragmen/src/migrations/v5.rs @@ -8,7 +8,7 @@ use super::super::*; /// situation where they could increase their free balance but still not be able to use their funds /// because they were less than the lock. pub fn migrate(to_migrate: Vec) -> Weight { - let mut weight = Weight::zero(); + let mut weight = 0; for who in to_migrate.iter() { if let Ok(mut voter) = Voting::::try_get(who) { diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index ddc55b08750d5..07ee7aa2012ad 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,24 +18,23 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-08-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_elections_phragmen // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_elections_phragmen +// --chain=dev // --output=./frame/elections-phragmen/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -71,12 +70,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - // Minimum execution time: 38_496 nanoseconds. - Weight::from_ref_time(39_424_348 as u64) - // Standard Error: 3_547 - .saturating_add(Weight::from_ref_time(119_971 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (26_143_000 as Weight) + // Standard Error: 23_000 + .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -85,12 +83,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - // Minimum execution time: 49_459 nanoseconds. - Weight::from_ref_time(50_225_486 as u64) - // Standard Error: 3_160 - .saturating_add(Weight::from_ref_time(170_360 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (40_431_000 as Weight) + // Standard Error: 6_000 + .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -99,42 +96,38 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - // Minimum execution time: 48_712 nanoseconds. - Weight::from_ref_time(49_463_298 as u64) - // Standard Error: 2_678 - .saturating_add(Weight::from_ref_time(231_771 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (40_188_000 as Weight) + // Standard Error: 6_000 + .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - // Minimum execution time: 48_359 nanoseconds. - Weight::from_ref_time(48_767_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (38_031_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - // Minimum execution time: 43_369 nanoseconds. - Weight::from_ref_time(49_587_113 as u64) - // Standard Error: 1_008 - .saturating_add(Weight::from_ref_time(77_752 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (43_715_000 as Weight) + // Standard Error: 0 + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - // Minimum execution time: 41_321 nanoseconds. - Weight::from_ref_time(50_803_289 as u64) - // Standard Error: 1_159 - .saturating_add(Weight::from_ref_time(57_239 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (47_882_000 as Weight) + // Standard Error: 1_000 + .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -142,22 +135,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - // Minimum execution time: 53_542 nanoseconds. - Weight::from_ref_time(54_481_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (45_600_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - // Minimum execution time: 41_825 nanoseconds. - Weight::from_ref_time(42_248_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (34_959_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - // Minimum execution time: 2_000_000_000 nanoseconds. - Weight::from_ref_time(2_000_000_000_000 as u64) + (2_000_000_000_000 as Weight) } // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) @@ -166,10 +156,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - // Minimum execution time: 62_600 nanoseconds. - Weight::from_ref_time(63_152_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (52_684_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) @@ -178,15 +167,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:5000 w:5000) // Storage: System Account (r:5000 w:5000) /// The range of component `v` is `[5000, 10000]`. - /// The range of component `d` is `[0, 5000]`. + /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - // Minimum execution time: 297_149_264 nanoseconds. - Weight::from_ref_time(297_898_499_000 as u64) - // Standard Error: 263_819 - .saturating_add(Weight::from_ref_time(37_914_985 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) + (0 as Weight) + // Standard Error: 65_000 + .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) @@ -196,22 +184,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:1 w:1) + // Storage: System Account (r:19 w:19) /// The range of component `c` is `[1, 1000]`. /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - // Minimum execution time: 22_034_317 nanoseconds. - Weight::from_ref_time(22_110_020_000 as u64) - // Standard Error: 235_528 - .saturating_add(Weight::from_ref_time(25_553_585 as u64).saturating_mul(v as u64)) - // Standard Error: 15_114 - .saturating_add(Weight::from_ref_time(1_032_330 as u64).saturating_mul(e as u64)) - .saturating_add(T::DbWeight::get().reads(280 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(c as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) + (0 as Weight) + // Standard Error: 778_000 + .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(279 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) } } @@ -224,12 +210,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - // Minimum execution time: 38_496 nanoseconds. - Weight::from_ref_time(39_424_348 as u64) - // Standard Error: 3_547 - .saturating_add(Weight::from_ref_time(119_971 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (26_143_000 as Weight) + // Standard Error: 23_000 + .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -238,12 +223,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - // Minimum execution time: 49_459 nanoseconds. - Weight::from_ref_time(50_225_486 as u64) - // Standard Error: 3_160 - .saturating_add(Weight::from_ref_time(170_360 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (40_431_000 as Weight) + // Standard Error: 6_000 + .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -252,42 +236,38 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - // Minimum execution time: 48_712 nanoseconds. - Weight::from_ref_time(49_463_298 as u64) - // Standard Error: 2_678 - .saturating_add(Weight::from_ref_time(231_771 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (40_188_000 as Weight) + // Standard Error: 6_000 + .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - // Minimum execution time: 48_359 nanoseconds. - Weight::from_ref_time(48_767_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (38_031_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - // Minimum execution time: 43_369 nanoseconds. - Weight::from_ref_time(49_587_113 as u64) - // Standard Error: 1_008 - .saturating_add(Weight::from_ref_time(77_752 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (43_715_000 as Weight) + // Standard Error: 0 + .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - // Minimum execution time: 41_321 nanoseconds. - Weight::from_ref_time(50_803_289 as u64) - // Standard Error: 1_159 - .saturating_add(Weight::from_ref_time(57_239 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (47_882_000 as Weight) + // Standard Error: 1_000 + .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -295,22 +275,19 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - // Minimum execution time: 53_542 nanoseconds. - Weight::from_ref_time(54_481_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (45_600_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - // Minimum execution time: 41_825 nanoseconds. - Weight::from_ref_time(42_248_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (34_959_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - // Minimum execution time: 2_000_000_000 nanoseconds. - Weight::from_ref_time(2_000_000_000_000 as u64) + (2_000_000_000_000 as Weight) } // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) @@ -319,10 +296,9 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - // Minimum execution time: 62_600 nanoseconds. - Weight::from_ref_time(63_152_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (52_684_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) @@ -331,15 +307,14 @@ impl WeightInfo for () { // Storage: Balances Locks (r:5000 w:5000) // Storage: System Account (r:5000 w:5000) /// The range of component `v` is `[5000, 10000]`. - /// The range of component `d` is `[0, 5000]`. + /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - // Minimum execution time: 297_149_264 nanoseconds. - Weight::from_ref_time(297_898_499_000 as u64) - // Standard Error: 263_819 - .saturating_add(Weight::from_ref_time(37_914_985 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) + (0 as Weight) + // Standard Error: 65_000 + .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) @@ -349,21 +324,19 @@ impl WeightInfo for () { // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:1 w:1) + // Storage: System Account (r:19 w:19) /// The range of component `c` is `[1, 1000]`. /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - // Minimum execution time: 22_034_317 nanoseconds. - Weight::from_ref_time(22_110_020_000 as u64) - // Standard Error: 235_528 - .saturating_add(Weight::from_ref_time(25_553_585 as u64).saturating_mul(v as u64)) - // Standard Error: 15_114 - .saturating_add(Weight::from_ref_time(1_032_330 as u64).saturating_mul(e as u64)) - .saturating_add(RocksDbWeight::get().reads(280 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(c as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(c as u64))) + (0 as Weight) + // Standard Error: 778_000 + .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(279 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) } } diff --git a/frame/examples/basic/Cargo.toml b/frame/examples/basic/Cargo.toml index e06bfa374cd9b..a5f0c7c89321a 100644 --- a/frame/examples/basic/Cargo.toml +++ b/frame/examples/basic/Cargo.toml @@ -31,7 +31,7 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../../primit default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 13f069c23e27b..d7b933577ead5 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_system::RawOrigin; // To actually run this benchmark on pallet-example-basic, we need to put this pallet into the // runtime and compile it with `runtime-benchmarks` feature. The detail procedures are // documented at: -// https://docs.substrate.io/reference/how-to-guides/weights/add-benchmarks/ +// https://docs.substrate.io/v3/runtime/benchmarking#how-to-benchmark // // The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. // The exact command of how the estimate generated is printed at the top of the file. @@ -34,26 +34,25 @@ use frame_system::RawOrigin; // Details on using the benchmarks macro can be seen at: // https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks benchmarks! { - // This will measure the execution time of `set_dummy`. + // This will measure the execution time of `set_dummy` for b in [1..1000] range. set_dummy_benchmark { - // This is the benchmark setup phase. - // `set_dummy` is a constant time function, hence we hard-code some random value here. - let value = 1000u32.into(); - }: set_dummy(RawOrigin::Root, value) // The execution phase is just running `set_dummy` extrinsic call + // This is the benchmark setup phase + let b in 1 .. 1000; + }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call verify { // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::dummy(), Some(value)) + assert_eq!(Pallet::::dummy(), Some(b.into())) } - // This will measure the execution time of `accumulate_dummy`. + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same // as the extrinsic call. `_(...)` is used to represent the extrinsic name. // The benchmark verification phase is omitted. accumulate_dummy { - let value = 1000u32.into(); + let b in 1 .. 1000; // The caller account is whitelisted for DB reads/write by the benchmarking macro. let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), value) + }: _(RawOrigin::Signed(caller), b.into()) // This will measure the execution time of sorting a vector. sort_vector { @@ -64,7 +63,7 @@ benchmarks! { } }: { // The benchmark execution phase could also be a closure with custom code - m.sort(); + m.sort_unstable(); } // This line generates test cases for benchmarking, and could be run by: diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 256529421caae..f8acc1962388f 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -273,9 +273,9 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::{ClassifyDispatch, DispatchClass, DispatchResult, Pays, PaysFee, WeighData}, + dispatch::DispatchResult, traits::IsSubType, - weights::Weight, + weights::{ClassifyDispatch, DispatchClass, Pays, PaysFee, WeighData, Weight}, }; use frame_system::ensure_signed; use log::info; @@ -318,7 +318,7 @@ const MILLICENTS: u32 = 1_000_000_000; // - assigns a dispatch class `operational` if the argument of the call is more than 1000. // // More information can be read at: -// - https://docs.substrate.io/main-docs/build/tx-weights-fees/ +// - https://docs.substrate.io/v3/runtime/weights-and-fees // // Manually configuring weight is an advanced operation and what you really need may well be // fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. @@ -329,7 +329,7 @@ impl WeighData<(&BalanceOf,)> for WeightForSetDum let multiplier = self.0; // *target.0 is the amount passed into the extrinsic let cents = *target.0 / >::from(MILLICENTS); - Weight::from_ref_time((cents * multiplier).saturated_into::()) + (cents * multiplier).saturated_into::() } } @@ -370,7 +370,7 @@ pub mod pallet { type MagicNumber: Get; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Type representing the weight of this pallet type WeightInfo: WeightInfo; @@ -392,7 +392,7 @@ pub mod pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. - Weight::zero() + 0 } // `on_finalize` is executed at the end of block after all extrinsic are dispatched. @@ -498,7 +498,7 @@ pub mod pallet { // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. #[pallet::weight( - ::WeightInfo::accumulate_dummy() + ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) )] pub fn accumulate_dummy(origin: OriginFor, increase_by: T::Balance) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. @@ -655,7 +655,7 @@ pub mod pallet { impl Pallet { // Add public immutables and private mutables. #[allow(dead_code)] - fn accumulate_foo(origin: T::RuntimeOrigin, increase_by: T::Balance) -> DispatchResult { + fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { let _sender = ensure_signed(origin)?; let prev = >::get(); @@ -719,11 +719,11 @@ impl sp_std::fmt::Debug for WatchDummy { impl SignedExtension for WatchDummy where - ::RuntimeCall: IsSubType>, + ::Call: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; type AccountId = T::AccountId; - type Call = ::RuntimeCall; + type Call = ::Call; type AdditionalSigned = (); type Pre = (); diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 97fbddfbc41e0..0f659e12fb443 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -19,10 +19,9 @@ use crate::*; use frame_support::{ - assert_ok, - dispatch::{DispatchInfo, GetDispatchInfo}, - parameter_types, + assert_ok, parameter_types, traits::{ConstU64, OnInitialize}, + weights::{DispatchInfo, GetDispatchInfo}, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures @@ -53,23 +52,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -88,7 +87,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -96,7 +95,7 @@ impl pallet_balances::Config for Test { impl Config for Test { type MagicNumber = ConstU64<1_000_000_000>; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); } @@ -128,12 +127,12 @@ fn it_works_for_optional_value() { assert_eq!(Example::dummy(), Some(val1)); // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val2)); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val2)); assert_eq!(Example::dummy(), Some(val1 + val2)); // Check that accumulate works when we Dummy has None in it. >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val1)); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val1)); assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); }); } @@ -142,7 +141,7 @@ fn it_works_for_optional_value() { fn it_works_for_default_value() { new_test_ext().execute_with(|| { assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(RuntimeOrigin::signed(1), 1)); + assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); assert_eq!(Example::foo(), 25); }); } @@ -151,7 +150,7 @@ fn it_works_for_default_value() { fn set_dummy_works() { new_test_ext().execute_with(|| { let test_val = 133; - assert_ok!(Example::set_dummy(RuntimeOrigin::root(), test_val.into())); + assert_ok!(Example::set_dummy(Origin::root(), test_val.into())); assert_eq!(Example::dummy(), Some(test_val)); }); } @@ -191,13 +190,11 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - // TODO: account for proof size weight - assert!(info1.weight.ref_time() > 0); + assert!(info1.weight > 0); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); - // TODO: account for proof size weight - assert!(info1.weight.ref_time() > info2.weight.ref_time()); + assert!(info1.weight > info2.weight); } diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index a69f0824eac11..5fc6434e396eb 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,26 +17,34 @@ //! Autogenerated weights for pallet_example_basic //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `Shawns-MacBook-Pro.local`, CPU: `` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: // ./target/release/substrate // benchmark -// pallet -// --chain=dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_example_basic -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --output=./ +// --chain +// dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// pallet_example_basic +// --extrinsic +// * +// --steps +// 100 +// --repeat +// 10 +// --raw +// --output +// ./ // --template // ./.maintain/frame-weight-template.hbs + #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -46,50 +54,48 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_example_basic. pub trait WeightInfo { - fn set_dummy_benchmark() -> Weight; - fn accumulate_dummy() -> Weight; + fn set_dummy_benchmark(b: u32, ) -> Weight; + fn accumulate_dummy(b: u32, ) -> Weight; fn sort_vector(x: u32, ) -> Weight; } /// Weights for pallet_example_basic using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: BasicExample Dummy (r:0 w:1) - fn set_dummy_benchmark() -> Weight { - Weight::from_ref_time(19_000_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - // Storage: BasicExample Dummy (r:1 w:1) - fn accumulate_dummy() -> Weight { - Weight::from_ref_time(18_000_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - /// The range of component `x` is `[0, 10000]`. fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 2 - .saturating_add(Weight::from_ref_time(520 as u64).saturating_mul(x as u64)) + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { - // Storage: BasicExample Dummy (r:0 w:1) - fn set_dummy_benchmark() -> Weight { - Weight::from_ref_time(19_000_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - // Storage: BasicExample Dummy (r:1 w:1) - fn accumulate_dummy() -> Weight { - Weight::from_ref_time(18_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - /// The range of component `x` is `[0, 10000]`. fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 2 - .saturating_add(Weight::from_ref_time(520 as u64).saturating_mul(x as u64)) + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) } } diff --git a/frame/examples/offchain-worker/Cargo.toml b/frame/examples/offchain-worker/Cargo.toml index bc5c0ac036021..e63b82757c030 100644 --- a/frame/examples/offchain-worker/Cargo.toml +++ b/frame/examples/offchain-worker/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -lite-json = { version = "0.2.0", default-features = false } +lite-json = { version = "0.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index fdf8b61a01acd..b40311051594b 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -126,7 +126,10 @@ pub mod pallet { type AuthorityId: AppCrypto; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; + + /// The overarching dispatch call type. + type Call: From>; // Configuration parameters diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index 72c001fd6e6cc..e5bd9fabc629b 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -53,15 +53,15 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -69,7 +69,7 @@ impl frame_system::Config for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -82,7 +82,7 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } -type Extrinsic = TestXt; +type Extrinsic = TestXt; type AccountId = <::Signer as IdentifyAccount>::AccountId; impl frame_system::offchain::SigningTypes for Test { @@ -92,22 +92,22 @@ impl frame_system::offchain::SigningTypes for Test { impl frame_system::offchain::SendTransactionTypes for Test where - RuntimeCall: From, + Call: From, { - type OverarchingCall = RuntimeCall; + type OverarchingCall = Call; type Extrinsic = Extrinsic; } impl frame_system::offchain::CreateSignedTransaction for Test where - RuntimeCall: From, + Call: From, { fn create_transaction>( - call: RuntimeCall, + call: Call, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option<(Call, ::SignaturePayload)> { Some((call, (nonce, ()))) } } @@ -117,8 +117,9 @@ parameter_types! { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type AuthorityId = crypto::TestAuthId; + type Call = Call; type GracePeriod = ConstU64<5>; type UnsignedInterval = ConstU64<128>; type UnsignedPriority = UnsignedPriority; @@ -134,10 +135,10 @@ fn it_aggregates_the_price() { sp_io::TestExternalities::default().execute_with(|| { assert_eq!(Example::average_price(), None); - assert_ok!(Example::submit_price(RuntimeOrigin::signed(test_pub()), 27)); + assert_ok!(Example::submit_price(Origin::signed(test_pub()), 27)); assert_eq!(Example::average_price(), Some(27)); - assert_ok!(Example::submit_price(RuntimeOrigin::signed(test_pub()), 43)); + assert_ok!(Example::submit_price(Origin::signed(test_pub()), 43)); assert_eq!(Example::average_price(), Some(35)); }); } @@ -232,7 +233,7 @@ fn should_submit_signed_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); + assert_eq!(tx.call, Call::Example(crate::Call::submit_price { price: 15523 })); }); } @@ -252,9 +253,10 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { ) .unwrap(); - let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) + let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) - .unwrap(); + .unwrap() + .clone(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); @@ -277,7 +279,7 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, }) = tx.call @@ -311,9 +313,10 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { ) .unwrap(); - let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) + let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) - .unwrap(); + .unwrap() + .clone(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); @@ -336,7 +339,7 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, }) = tx.call @@ -378,10 +381,7 @@ fn should_submit_raw_unsigned_transaction_on_chain() { assert_eq!(tx.signature, None); assert_eq!( tx.call, - RuntimeCall::Example(crate::Call::submit_price_unsigned { - block_number: 1, - price: 15523 - }) + Call::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 }) ); }); } diff --git a/frame/examples/parallel/Cargo.toml b/frame/examples/parallel/Cargo.toml new file mode 100644 index 0000000000000..f4d2c72a23a49 --- /dev/null +++ b/frame/examples/parallel/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "pallet-example-parallel" +version = "3.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Unlicense" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example pallet using runtime worker threads" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +sp-core = { version = "6.0.0", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "6.0.0", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } +sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-tasks/std", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/examples/parallel/README.md b/frame/examples/parallel/README.md new file mode 100644 index 0000000000000..44b39a41507db --- /dev/null +++ b/frame/examples/parallel/README.md @@ -0,0 +1,7 @@ + +# Parallel Tasks Example Pallet + +This example pallet demonstrates parallelizing validation of the enlisted participants (see +`enlist_participants` dispatch). + +**This pallet serves as an example and is not meant to be used in production.** diff --git a/frame/examples/parallel/src/lib.rs b/frame/examples/parallel/src/lib.rs new file mode 100644 index 0000000000000..7b8948c2ebd09 --- /dev/null +++ b/frame/examples/parallel/src/lib.rs @@ -0,0 +1,151 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Parallel Tasks Example Pallet +//! +//! This example pallet demonstrates parallelizing validation of the enlisted participants +//! (see `enlist_participants` dispatch). +//! +//! **This pallet serves as an example and is not meant to be used in production.** + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_runtime::RuntimeDebug; + +use codec::{Decode, Encode}; +use sp_std::vec::Vec; + +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching dispatch call type. + type Call: From>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::without_storage_info] + pub struct Pallet(_); + + /// A public part of the pallet. + #[pallet::call] + impl Pallet { + /// Get the new event running. + #[pallet::weight(0)] + pub fn run_event(origin: OriginFor, id: Vec) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + >::kill(); + >::mutate(move |event_id| *event_id = id); + Ok(().into()) + } + + /// Submit list of participants to the current event. + /// + /// The example utilizes parallel execution by checking half of the + /// signatures in spawned task. + #[pallet::weight(0)] + pub fn enlist_participants( + origin: OriginFor, + participants: Vec, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + if validate_participants_parallel(&>::get(), &participants[..]) { + for participant in participants { + >::append(participant.account); + } + } + Ok(().into()) + } + } + + /// A vector of current participants + /// + /// To enlist someone to participate, signed payload should be + /// sent to `enlist`. + #[pallet::storage] + #[pallet::getter(fn participants)] + pub(super) type Participants = StorageValue<_, Vec>, ValueQuery>; + + /// Current event id to enlist participants to. + #[pallet::storage] + #[pallet::getter(fn get_current_event_id)] + pub(super) type CurrentEventId = StorageValue<_, Vec, ValueQuery>; +} + +/// Request to enlist participant. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, scale_info::TypeInfo)] +pub struct EnlistedParticipant { + pub account: Vec, + pub signature: Vec, +} + +impl EnlistedParticipant { + fn verify(&self, event_id: &[u8]) -> bool { + use sp_core::ByteArray; + use sp_runtime::traits::Verify; + + match sp_core::sr25519::Signature::try_from(&self.signature[..]) { + Ok(signature) => match sp_core::sr25519::Public::from_slice(self.account.as_ref()) { + Err(()) => false, + Ok(signer) => signature.verify(event_id, &signer), + }, + _ => false, + } + } +} + +fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParticipant]) -> bool { + fn spawn_verify(data: Vec) -> Vec { + let stream = &mut &data[..]; + let event_id = Vec::::decode(stream).expect("Failed to decode"); + let participants = Vec::::decode(stream).expect("Failed to decode"); + + for participant in participants { + if !participant.verify(&event_id) { + return false.encode() + } + } + true.encode() + } + + let mut async_payload = Vec::new(); + event_id.encode_to(&mut async_payload); + participants[..participants.len() / 2].encode_to(&mut async_payload); + + let handle = sp_tasks::spawn(spawn_verify, async_payload); + let mut result = true; + + for participant in &participants[participants.len() / 2 + 1..] { + if !participant.verify(event_id) { + result = false; + break + } + } + + bool::decode(&mut &handle.join()[..]).expect("Failed to decode result") && result +} diff --git a/frame/examples/parallel/src/tests.rs b/frame/examples/parallel/src/tests.rs new file mode 100644 index 0000000000000..67d823d8b204b --- /dev/null +++ b/frame/examples/parallel/src/tests.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{self as pallet_example_parallel, *}; + +use frame_support::parameter_types; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Example: pallet_example_parallel::{Pallet, Call, Storage}, + } +); + +parameter_types! { + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Call = Call; + type PalletInfo = PalletInfo; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = sp_core::sr25519::Public; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = frame_support::traits::ConstU64<250>; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl Config for Test { + type Call = Call; +} + +fn test_pub(n: u8) -> sp_core::sr25519::Public { + sp_core::sr25519::Public::from_raw([n; 32]) +} + +fn test_origin(n: u8) -> Origin { + Origin::signed(test_pub(n)) +} + +#[test] +fn it_can_enlist() { + use sp_core::Pair; + + sp_io::TestExternalities::default().execute_with(|| { + let (pair1, _) = sp_core::sr25519::Pair::generate(); + let (pair2, _) = sp_core::sr25519::Pair::generate(); + + let event_name = b"test"; + + Example::run_event(test_origin(1), event_name.to_vec()).expect("Failed to enlist"); + + let participants = vec![ + EnlistedParticipant { + account: pair1.public().to_vec(), + signature: AsRef::<[u8]>::as_ref(&pair1.sign(event_name)).to_vec(), + }, + EnlistedParticipant { + account: pair2.public().to_vec(), + signature: AsRef::<[u8]>::as_ref(&pair2.sign(event_name)).to_vec(), + }, + ]; + + Example::enlist_participants(Origin::signed(test_pub(1)), participants) + .expect("Failed to enlist"); + + assert_eq!(Example::participants().len(), 2); + }); +} + +#[test] +fn one_wrong_will_not_enlist_anyone() { + use sp_core::Pair; + + sp_io::TestExternalities::default().execute_with(|| { + let (pair1, _) = sp_core::sr25519::Pair::generate(); + let (pair2, _) = sp_core::sr25519::Pair::generate(); + let (pair3, _) = sp_core::sr25519::Pair::generate(); + + let event_name = b"test"; + + Example::run_event(test_origin(1), event_name.to_vec()).expect("Failed to enlist"); + + let participants = vec![ + EnlistedParticipant { + account: pair1.public().to_vec(), + signature: AsRef::<[u8]>::as_ref(&pair1.sign(event_name)).to_vec(), + }, + EnlistedParticipant { + account: pair2.public().to_vec(), + signature: AsRef::<[u8]>::as_ref(&pair2.sign(event_name)).to_vec(), + }, + // signing wrong event + EnlistedParticipant { + account: pair3.public().to_vec(), + signature: AsRef::<[u8]>::as_ref(&pair3.sign(&[])).to_vec(), + }, + ]; + + Example::enlist_participants(test_origin(1), participants).expect("Failed to enlist"); + + assert_eq!(Example::participants().len(), 0); + }); +} diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index f6f5175d63bb9..b67f3313e612b 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -19,7 +19,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../try-runtime", optional = true } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -27,7 +26,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives sp-tracing = { version = "5.0.0", default-features = false, path = "../../primitives/tracing" } [dev-dependencies] -array-bytes = "4.1" +hex-literal = "0.3.4" pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../transaction-payment" } sp-core = { version = "6.0.0", path = "../../primitives/core" } @@ -49,4 +48,4 @@ std = [ "sp-std/std", "sp-tracing/std", ] -try-runtime = ["frame-support/try-runtime", "frame-try-runtime/try-runtime" ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/executive/README.md b/frame/executive/README.md index c14c3912b082d..e96d07b0843f2 100644 --- a/frame/executive/README.md +++ b/frame/executive/README.md @@ -56,7 +56,7 @@ struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { // Do whatever you want. - frame_support::weights::Weight::zero() + 0 } } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index b7884efccf685..cd3e1c500db26 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -107,7 +107,7 @@ //! impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { //! fn on_runtime_upgrade() -> frame_support::weights::Weight { //! // Do whatever you want. -//! frame_support::weights::Weight::zero() +//! 0 //! } //! } //! @@ -118,17 +118,17 @@ use codec::{Codec, Encode}; use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, + dispatch::PostDispatchInfo, traits::{ EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, }, - weights::Weight, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; use sp_runtime::{ generic::Digest, traits::{ - self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, + self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, Saturating, ValidateUnsigned, Zero, }, transaction_validity::{TransactionSource, TransactionValidity}, @@ -138,7 +138,7 @@ use sp_std::{marker::PhantomData, prelude::*}; pub type CheckedOf = >::Checked; pub type CallOf = as Applyable>::Call; -pub type OriginOf = as Dispatchable>::RuntimeOrigin; +pub type OriginOf = as Dispatchable>::Origin; /// Main entry point for certain runtime actions as e.g. `execute_block`. /// @@ -202,7 +202,6 @@ where } } -#[cfg(feature = "try-runtime")] impl< System: frame_system::Config + EnsureInherentsAreFirst, Block: traits::Block

, @@ -212,8 +211,7 @@ impl< + OnInitialize + OnIdle + OnFinalize - + OffchainWorker - + frame_support::traits::TryState, + + OffchainWorker, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -224,20 +222,16 @@ where OriginOf: From>, UnsignedValidator: ValidateUnsigned>, { - /// Execute given block, but don't as strict is the normal block execution. - /// - /// Some consensus related checks such as the state root check can be switched off via - /// `try_state_root`. Some additional non-consensus checks can be additionally enabled via - /// `try_state`. - /// - /// Should only be used for testing ONLY. - pub fn try_execute_block( - block: Block, - try_state_root: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Result { - use frame_support::traits::TryState; + /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. + pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() + } + /// Execute given block, but don't do any of the `final_checks`. + /// + /// Should only be used for testing. + #[cfg(feature = "try-runtime")] + pub fn execute_block_no_check(block: Block) -> frame_support::weights::Weight { Self::initialize_block(block.header()); Self::initial_checks(&block); @@ -245,17 +239,7 @@ where Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); - // run the try-state checks of all pallets. - >::try_state( - *header.number(), - select, - ) - .map_err(|e| { - frame_support::log::error!(target: "runtime::executive", "failure: {:?}", e); - e - })?; - - // do some of the checks that would normally happen in `final_checks`, but perhaps skip + // do some of the checks that would normally happen in `final_checks`, but definitely skip // the state root check. { let new_header = >::finalize(); @@ -265,56 +249,26 @@ where assert!(header_item == computed_item, "Digest item must match that calculated."); } - if try_state_root { - let storage_root = new_header.state_root(); - header.state_root().check_equal(storage_root); - assert!( - header.state_root() == storage_root, - "Storage root must match that calculated." - ); - } - assert!( header.extrinsics_root() == new_header.extrinsics_root(), "Transaction trie root must be valid.", ); } - Ok(frame_system::Pallet::::block_weight().total()) + frame_system::Pallet::::block_weight().total() } /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. /// /// This should only be used for testing. + #[cfg(feature = "try-runtime")] pub fn try_runtime_upgrade() -> Result { + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::pre_upgrade().unwrap(); let weight = Self::execute_on_runtime_upgrade(); - Ok(weight) - } -} -impl< - System: frame_system::Config + EnsureInherentsAreFirst, - Block: traits::Block
, - Context: Default, - UnsignedValidator, - AllPalletsWithSystem: OnRuntimeUpgrade - + OnInitialize - + OnIdle - + OnFinalize - + OffchainWorker, - COnRuntimeUpgrade: OnRuntimeUpgrade, - > Executive -where - Block::Extrinsic: Checkable + Codec, - CheckedOf: Applyable + GetDispatchInfo, - CallOf: - Dispatchable, - OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, -{ - /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. - pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::post_upgrade().unwrap(); + + Ok(weight) } /// Start the execution of a particular block. @@ -345,7 +299,7 @@ where // This means the format of all the event related storages must always be compatible. >::reset_events(); - let mut weight = Weight::zero(); + let mut weight = 0; if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } @@ -459,7 +413,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight.all_gt(Weight::zero()) { + if remaining_weight > 0 { let used_weight = >::on_idle( block_number, remaining_weight, @@ -594,6 +548,8 @@ where mod tests { use super::*; + use hex_literal::hex; + use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, @@ -637,12 +593,12 @@ mod tests { // one with block number arg and one without fn on_initialize(n: T::BlockNumber) -> Weight { println!("on_initialize({})", n); - Weight::from_ref_time(175) + 175 } fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { println!("on_idle{}, {})", n, remaining_weight); - Weight::from_ref_time(175) + 175 } fn on_finalize(n: T::BlockNumber) { @@ -651,7 +607,7 @@ mod tests { fn on_runtime_upgrade() -> Weight { sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - Weight::from_ref_time(200) + 200 } fn offchain_worker(n: T::BlockNumber) { @@ -765,9 +721,9 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::builder() - .base_block(Weight::from_ref_time(10)) - .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_ref_time(5)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into()) + .base_block(10) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = 5) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = 1024.into()) .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, @@ -779,16 +735,16 @@ mod tests { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type BlockNumber = u64; type Hash = sp_core::H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = RuntimeVersion; type PalletInfo = PalletInfo; @@ -804,7 +760,7 @@ mod tests { type Balance = u64; impl pallet_balances::Config for Runtime { type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -818,7 +774,7 @@ mod tests { pub const TransactionByteFee: Balance = 0; } impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -830,12 +786,12 @@ mod tests { pub struct RuntimeVersion; impl frame_support::traits::Get for RuntimeVersion { fn get() -> sp_version::RuntimeVersion { - RuntimeVersionTestValues::get().clone() + RUNTIME_VERSION.with(|v| v.borrow().clone()) } } - parameter_types! { - pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = + thread_local! { + pub static RUNTIME_VERSION: std::cell::RefCell = Default::default(); } @@ -845,7 +801,7 @@ mod tests { frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); - type TestXt = sp_runtime::testing::TestXt; + type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; type TestUncheckedExtrinsic = TestXt; @@ -858,7 +814,7 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); - Weight::from_ref_time(100) + 100 } } @@ -884,8 +840,8 @@ mod tests { Some((who, extra(nonce, fee))) } - fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) + fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) } #[test] @@ -937,15 +893,11 @@ mod tests { fn block_import_works() { block_import_works_inner( new_test_ext_v0(1), - array_bytes::hex_n_into_unchecked( - "0d786e24c1f9e6ce237806a22c005bbbc7dee4edd6692b6c5442843d164392de", - ), + hex!("1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5").into(), ); block_import_works_inner( new_test_ext(1), - array_bytes::hex_n_into_unchecked( - "348485a4ab856467b440167e45f99b491385e8528e09b0e51f85f814a3021c93", - ), + hex!("75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5").into(), ); } fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { @@ -955,9 +907,10 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root, - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), digest: Digest { logs: vec![] }, }, extrinsics: vec![], @@ -974,9 +927,10 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: [0u8; 32].into(), - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), digest: Digest { logs: vec![] }, }, extrinsics: vec![], @@ -992,9 +946,10 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", - ), + state_root: hex!( + "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5" + ) + .into(), extrinsics_root: [0u8; 32].into(), digest: Digest { logs: vec![] }, }, @@ -1029,16 +984,16 @@ mod tests { let mut t = new_test_ext(10000); // given: TestXt uses the encoded len as fixed Len: let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; + let encoded_len = encoded.len() as Weight; // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); - let base_block_weight = Weight::from_ref_time(175) + block_weights.base_block; + let base_block_weight = 175 + block_weights.base_block; let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); + let num_to_exhaust_block = limit / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( 1, @@ -1052,7 +1007,7 @@ mod tests { for nonce in 0..=num_to_exhaust_block { let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, nonce.into(), 0), ); let res = Executive::apply_extrinsic(xt); @@ -1061,7 +1016,7 @@ mod tests { assert_eq!( >::block_weight().total(), //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_ref_time((encoded_len + 5) * (nonce + 1)) + base_block_weight, + (encoded_len + 5) * (nonce + 1) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -1077,23 +1032,23 @@ mod tests { #[test] fn block_weight_and_size_is_stored_per_tx() { let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let x1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 1, 0), ); let x2 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 2, 0), ); let len = xt.clone().encode().len() as u32; let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = Weight::from_ref_time(175) + - ::BlockWeights::get().base_block; + let base_block_weight = + 175 + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -1111,13 +1066,13 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_ref_time(len as u64) + + let extrinsic_weight = len as Weight + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; assert_eq!( >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, + base_block_weight + 3 * extrinsic_weight, ); assert_eq!(>::all_extrinsics_len(), 3 * len); @@ -1141,8 +1096,8 @@ mod tests { #[test] fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); + let valid = TestXt::new(Call::Custom(custom::Call::allowed_unsigned {}), None); + let invalid = TestXt::new(Call::Custom(custom::Call::unallowed_unsigned {}), None); let mut t = new_test_ext(1); t.execute_with(|| { @@ -1180,7 +1135,7 @@ mod tests { id, &1, 110, lock, ); let xt = TestXt::new( - RuntimeCall::System(SystemCall::remark { remark: vec![1u8] }), + Call::System(SystemCall::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), ); let weight = xt.get_dispatch_info().weight + @@ -1225,23 +1180,21 @@ mod tests { // NOTE: might need updates over time if new weights are introduced. // For now it only accounts for the base block execution weight and // the `on_initialize` weight defined in the custom test module. - assert_eq!( - >::block_weight().total(), - Weight::from_ref_time(175 + 175 + 10) - ); + assert_eq!(>::block_weight().total(), 175 + 175 + 10); }) } #[test] fn runtime_upgraded_should_work() { new_test_ext(1).execute_with(|| { - RuntimeVersionTestValues::mutate(|v| *v = Default::default()); + RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); // It should be added at genesis assert!(frame_system::LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); assert_eq!( @@ -1249,8 +1202,8 @@ mod tests { frame_system::LastRuntimeUpgrade::::get(), ); - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { spec_version: 1, spec_name: "test".into(), ..Default::default() @@ -1262,8 +1215,8 @@ mod tests { frame_system::LastRuntimeUpgrade::::get(), ); - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { spec_version: 1, spec_name: "test".into(), impl_version: 2, @@ -1311,8 +1264,9 @@ mod tests { fn custom_runtime_upgrade_is_called_before_modules() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); Executive::initialize_block(&Header::new( @@ -1332,8 +1286,9 @@ mod tests { fn event_from_runtime_upgrade_is_included() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); // set block number to non zero so events are not excluded @@ -1356,14 +1311,15 @@ mod tests { #[test] fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let header = new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); // Let's build some fake block. @@ -1381,14 +1337,15 @@ mod tests { }); // Reset to get the correct new genesis below. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } }); new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); >>::execute_block(Block::new(header, vec![xt])); @@ -1402,8 +1359,9 @@ mod tests { fn all_weights_are_recorded_correctly() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); let block_number = 1; @@ -1455,7 +1413,7 @@ mod tests { #[test] fn calculating_storage_root_twice_works() { - let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); + let call = Call::Custom(custom::Call::calculate_storage_root {}); let xt = TestXt::new(call, sign_extra(1, 0, 0)); let header = new_test_ext(1).execute_with(|| { @@ -1482,10 +1440,10 @@ mod tests { #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] fn invalid_inherent_position_fail() { let xt1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); + let xt2 = TestXt::new(Call::Custom(custom::Call::inherent_call {}), None); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1510,7 +1468,7 @@ mod tests { #[test] fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); + let xt1 = TestXt::new(Call::Custom(custom::Call::inherent_call {}), None); let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); let header = new_test_ext(1).execute_with(|| { diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml deleted file mode 100644 index f14a5e7b9c20b..0000000000000 --- a/frame/fast-unstake/Cargo.toml +++ /dev/null @@ -1,66 +0,0 @@ -[package] -name = "pallet-fast-unstake" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Unlicense" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME fast unstake pallet" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } - -frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } - -sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } -sp-staking = { default-features = false, path = "../../primitives/staking" } -frame-election-provider-support = { default-features = false, path = "../election-provider-support" } - -frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } - -[dev-dependencies] -pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } -sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } -substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } -sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } -pallet-staking = { path = "../staking" } -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } - - -[features] -default = ["std"] -std = [ - "codec/std", - "log/std", - "scale-info/std", - - "frame-support/std", - "frame-system/std", - - "sp-io/std", - "sp-staking/std", - "sp-runtime/std", - "sp-std/std", - - "frame-election-provider-support/std", - - "frame-benchmarking/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-staking/runtime-benchmarks", - "pallet-staking/runtime-benchmarks" -] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs deleted file mode 100644 index b4a5e21dcfc13..0000000000000 --- a/frame/fast-unstake/src/benchmarking.rs +++ /dev/null @@ -1,201 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Benchmarking for pallet-fast-unstake. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::{types::*, Pallet as FastUnstake, *}; -use frame_benchmarking::{benchmarks, whitelist_account}; -use frame_support::{ - assert_ok, - traits::{Currency, EnsureOrigin, Get, Hooks}, -}; -use frame_system::RawOrigin; -use sp_runtime::traits::Zero; -use sp_staking::{EraIndex, StakingInterface}; -use sp_std::prelude::*; - -const USER_SEED: u32 = 0; -const DEFAULT_BACKER_PER_VALIDATOR: u32 = 128; -const MAX_VALIDATORS: u32 = 128; - -type CurrencyOf = ::Currency; - -fn create_unexposed_nominators() -> Vec { - (0..T::BatchSize::get()) - .map(|i| { - let account = - frame_benchmarking::account::("unexposed_nominator", i, USER_SEED); - fund_and_bond_account::(&account); - account - }) - .collect() -} - -fn fund_and_bond_account(account: &T::AccountId) { - let stake = CurrencyOf::::minimum_balance() * 100u32.into(); - CurrencyOf::::make_free_balance_be(&account, stake * 10u32.into()); - - // bond and nominate ourselves, this will guarantee that we are not backing anyone. - assert_ok!(T::Staking::bond(account, stake, account)); - assert_ok!(T::Staking::nominate(account, vec![account.clone()])); -} - -pub(crate) fn fast_unstake_events() -> Vec> { - frame_system::Pallet::::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| ::RuntimeEvent::from(e).try_into().ok()) - .collect::>() -} - -fn setup_staking(v: u32, until: EraIndex) { - let ed = CurrencyOf::::minimum_balance(); - - log!(debug, "registering {} validators and {} eras.", v, until); - - // our validators don't actually need to registered in staking -- just generate `v` random - // accounts. - let validators = (0..v) - .map(|x| frame_benchmarking::account::("validator", x, USER_SEED)) - .collect::>(); - - for era in 0..=until { - let others = (0..DEFAULT_BACKER_PER_VALIDATOR) - .map(|s| { - let who = frame_benchmarking::account::("nominator", era, s); - let value = ed; - (who, value) - }) - .collect::>(); - validators.iter().for_each(|v| { - T::Staking::add_era_stakers(&era, &v, others.clone()); - }); - } -} - -fn on_idle_full_block() { - let remaining_weight = ::BlockWeights::get().max_block; - FastUnstake::::on_idle(Zero::zero(), remaining_weight); -} - -benchmarks! { - // on_idle, we don't check anyone, but fully unbond them. - on_idle_unstake { - ErasToCheckPerBlock::::put(1); - for who in create_unexposed_nominators::() { - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(who.clone()).into(), - )); - } - - // run on_idle once. This will check era 0. - assert_eq!(Head::::get(), None); - on_idle_full_block::(); - - assert!(matches!( - Head::::get(), - Some(UnstakeRequest { - checked, - stashes, - .. - }) if checked.len() == 1 && stashes.len() as u32 == T::BatchSize::get() - )); - } - : { - on_idle_full_block::(); - } - verify { - assert!(matches!( - fast_unstake_events::().last(), - Some(Event::BatchFinished) - )); - } - - // on_idle, when we check some number of eras, - on_idle_check { - // number of eras multiplied by validators in that era. - let x in (T::Staking::bonding_duration() * 1) .. (T::Staking::bonding_duration() * MAX_VALIDATORS); - - let u = T::Staking::bonding_duration(); - let v = x / u; - - ErasToCheckPerBlock::::put(u); - T::Staking::set_current_era(u); - - // setup staking with v validators and u eras of data (0..=u) - setup_staking::(v, u); - - let stashes = create_unexposed_nominators::().into_iter().map(|s| { - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(s.clone()).into(), - )); - (s, T::Deposit::get()) - }).collect::>(); - - // no one is queued thus far. - assert_eq!(Head::::get(), None); - } - : { - on_idle_full_block::(); - } - verify { - let checked = (1..=u).rev().collect::>(); - let request = Head::::get().unwrap(); - assert_eq!(checked, request.checked.into_inner()); - assert!(matches!( - fast_unstake_events::().last(), - Some(Event::BatchChecked { .. }) - )); - assert!(stashes.iter().all(|(s, _)| request.stashes.iter().find(|(ss, _)| ss == s).is_some())); - } - - register_fast_unstake { - ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominators::().get(0).cloned().unwrap(); - whitelist_account!(who); - assert_eq!(Queue::::count(), 0); - - } - :_(RawOrigin::Signed(who.clone())) - verify { - assert_eq!(Queue::::count(), 1); - } - - deregister { - ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominators::().get(0).cloned().unwrap(); - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(who.clone()).into(), - )); - assert_eq!(Queue::::count(), 1); - whitelist_account!(who); - } - :_(RawOrigin::Signed(who.clone())) - verify { - assert_eq!(Queue::::count(), 0); - } - - control { - let origin = ::ControlOrigin::successful_origin(); - } - : _(origin, 128) - verify {} - - impl_benchmark_test_suite!(Pallet, crate::mock::ExtBuilder::default().build(), crate::mock::Runtime) -} diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs deleted file mode 100644 index c83054189feb7..0000000000000 --- a/frame/fast-unstake/src/lib.rs +++ /dev/null @@ -1,495 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A pallet that's designed to JUST do the following: -//! -//! If a nominator is not exposed in any `ErasStakers` (i.e. "has not actively backed any -//! validators in the last `BondingDuration` days"), then they can register themselves in this -//! pallet, unstake faster than having to wait an entire bonding duration. -//! -//! Appearing in the exposure of a validator means being exposed equal to that validator from the -//! point of view of the staking system. This usually means earning rewards with the validator, and -//! also being at the risk of slashing with the validator. This is equivalent to the "Active -//! Nominator" role explained in the -//! [February Staking Update](https://polkadot.network/blog/staking-update-february-2022/). -//! -//! This pallet works off the basis of `on_idle`, meaning that it provides no guarantee about when -//! it will succeed, if at all. Moreover, the queue implementation is unordered. In case of -//! congestion, no FIFO ordering is provided. -//! -//! Stakers who are certain about NOT being exposed can register themselves with -//! [`Call::register_fast_unstake`]. This will chill, and fully unbond the staker, and place them in -//! the queue to be checked. -//! -//! Once queued, but not being actively processed, stakers can withdraw their request via -//! [`Call::deregister`]. -//! -//! Once queued, a staker wishing to unbond can perform no further action in pallet-staking. This is -//! to prevent them from accidentally exposing themselves behind a validator etc. -//! -//! Once processed, if successful, no additional fee for the checking process is taken, and the -//! staker is instantly unbonded. -//! -//! If unsuccessful, meaning that the staker was exposed sometime in the last `BondingDuration` eras -//! they will end up being slashed for the amount of wasted work they have inflicted on the chian. - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use pallet::*; - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod tests; - -// NOTE: enable benchmarking in tests as well. -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; -pub mod migrations; -pub mod types; -pub mod weights; - -pub const LOG_TARGET: &'static str = "runtime::fast-unstake"; - -// syntactic sugar for logging. -#[macro_export] -macro_rules! log { - ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { - log::$level!( - target: crate::LOG_TARGET, - concat!("[{:?}] 💨 ", $patter), >::block_number() $(, $values)* - ) - }; -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use crate::types::*; - use frame_support::{ - pallet_prelude::*, - traits::{Defensive, ReservableCurrency, StorageVersion}, - }; - use frame_system::pallet_prelude::*; - use sp_runtime::{ - traits::{Saturating, Zero}, - DispatchResult, - }; - use sp_staking::{EraIndex, StakingInterface}; - use sp_std::{prelude::*, vec::Vec}; - pub use weights::WeightInfo; - - #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] - #[codec(mel_bound(T: Config))] - #[scale_info(skip_type_params(T))] - pub struct MaxChecking(sp_std::marker::PhantomData); - impl frame_support::traits::Get for MaxChecking { - fn get() -> u32 { - T::Staking::bonding_duration() + 1 - } - } - - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent> - + TryInto>; - - /// The currency used for deposits. - type Currency: ReservableCurrency; - - /// Deposit to take for unstaking, to make sure we're able to slash the it in order to cover - /// the costs of resources on unsuccessful unstake. - type Deposit: Get>; - - /// The origin that can control this pallet. - type ControlOrigin: frame_support::traits::EnsureOrigin; - - /// Batch size. - /// - /// This many stashes are processed in each unstake request. - type BatchSize: Get; - - /// The access to staking functionality. - type Staking: StakingInterface, AccountId = Self::AccountId>; - - /// The weight information of this pallet. - type WeightInfo: WeightInfo; - } - - /// The current "head of the queue" being unstaked. - #[pallet::storage] - pub type Head = StorageValue<_, UnstakeRequest, OptionQuery>; - - /// The map of all accounts wishing to be unstaked. - /// - /// Keeps track of `AccountId` wishing to unstake and it's corresponding deposit. - #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, BalanceOf>; - - /// Number of eras to check per block. - /// - /// If set to 0, this pallet does absolutely nothing. - /// - /// Based on the amount of weight available at `on_idle`, up to this many eras of a single - /// nominator might be checked. - #[pallet::storage] - pub type ErasToCheckPerBlock = StorageValue<_, u32, ValueQuery>; - - /// The events of this pallet. - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// A staker was unstaked. - Unstaked { stash: T::AccountId, result: DispatchResult }, - /// A staker was slashed for requesting fast-unstake whilst being exposed. - Slashed { stash: T::AccountId, amount: BalanceOf }, - /// Some internal error happened while migrating stash. They are removed as head as a - /// consequence. - Errored { stash: T::AccountId }, - /// An internal error happened. Operations will be paused now. - InternalError, - /// A batch was partially checked for the given eras, but the process did not finish. - BatchChecked { eras: Vec }, - /// A batch was terminated. - /// - /// This is always follows by a number of `Unstaked` or `Slashed` events, marking the end - /// of the batch. A new batch will be created upon next block. - BatchFinished, - } - - #[pallet::error] - #[cfg_attr(test, derive(PartialEq))] - pub enum Error { - /// The provided Controller account was not found. - /// - /// This means that the given account is not bonded. - NotController, - /// The bonded account has already been queued. - AlreadyQueued, - /// The bonded account has active unlocking chunks. - NotFullyBonded, - /// The provided un-staker is not in the `Queue`. - NotQueued, - /// The provided un-staker is already in Head, and cannot deregister. - AlreadyHead, - /// The call is not allowed at this point because the pallet is not active. - CallNotAllowed, - } - - #[pallet::hooks] - impl Hooks for Pallet { - fn on_idle(_: T::BlockNumber, remaining_weight: Weight) -> Weight { - if remaining_weight.any_lt(T::DbWeight::get().reads(2)) { - return Weight::from_ref_time(0) - } - - Self::do_on_idle(remaining_weight) - } - } - - #[pallet::call] - impl Pallet { - /// Register oneself for fast-unstake. - /// - /// The dispatch origin of this call must be signed by the controller account, similar to - /// `staking::unbond`. - /// - /// The stash associated with the origin must have no ongoing unlocking chunks. If - /// successful, this will fully unbond and chill the stash. Then, it will enqueue the stash - /// to be checked in further blocks. - /// - /// If by the time this is called, the stash is actually eligible for fast-unstake, then - /// they are guaranteed to remain eligible, because the call will chill them as well. - /// - /// If the check works, the entire staking data is removed, i.e. the stash is fully - /// unstaked. - /// - /// If the check fails, the stash remains chilled and waiting for being unbonded as in with - /// the normal staking system, but they lose part of their unbonding chunks due to consuming - /// the chain's resources. - #[pallet::weight(::WeightInfo::register_fast_unstake())] - pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { - let ctrl = ensure_signed(origin)?; - - ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); - let stash_account = - T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; - ensure!(!Queue::::contains_key(&stash_account), Error::::AlreadyQueued); - ensure!(!Self::is_head(&stash_account), Error::::AlreadyHead); - ensure!(!T::Staking::is_unbonding(&stash_account)?, Error::::NotFullyBonded); - - // chill and fully unstake. - T::Staking::chill(&stash_account)?; - T::Staking::fully_unbond(&stash_account)?; - - T::Currency::reserve(&stash_account, T::Deposit::get())?; - - // enqueue them. - Queue::::insert(stash_account, T::Deposit::get()); - Ok(()) - } - - /// Deregister oneself from the fast-unstake. - /// - /// This is useful if one is registered, they are still waiting, and they change their mind. - /// - /// Note that the associated stash is still fully unbonded and chilled as a consequence of - /// calling `register_fast_unstake`. This should probably be followed by a call to - /// `Staking::rebond`. - #[pallet::weight(::WeightInfo::deregister())] - pub fn deregister(origin: OriginFor) -> DispatchResult { - let ctrl = ensure_signed(origin)?; - - ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); - - let stash_account = - T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; - ensure!(Queue::::contains_key(&stash_account), Error::::NotQueued); - ensure!(!Self::is_head(&stash_account), Error::::AlreadyHead); - let deposit = Queue::::take(stash_account.clone()); - - if let Some(deposit) = deposit.defensive() { - let remaining = T::Currency::unreserve(&stash_account, deposit); - if !remaining.is_zero() { - Self::halt("not enough balance to unreserve"); - } - } - - Ok(()) - } - - /// Control the operation of this pallet. - /// - /// Dispatch origin must be signed by the [`Config::ControlOrigin`]. - #[pallet::weight(::WeightInfo::control())] - pub fn control(origin: OriginFor, unchecked_eras_to_check: EraIndex) -> DispatchResult { - let _ = T::ControlOrigin::ensure_origin(origin)?; - ErasToCheckPerBlock::::put(unchecked_eras_to_check); - Ok(()) - } - } - - impl Pallet { - /// Returns `true` if `staker` is anywhere to be found in the `head`. - pub(crate) fn is_head(staker: &T::AccountId) -> bool { - Head::::get().map_or(false, |UnstakeRequest { stashes, .. }| { - stashes.iter().any(|(stash, _)| stash == staker) - }) - } - - /// Halt the operations of this pallet. - pub(crate) fn halt(reason: &'static str) { - frame_support::defensive!(reason); - ErasToCheckPerBlock::::put(0); - Self::deposit_event(Event::::InternalError) - } - - /// process up to `remaining_weight`. - /// - /// Returns the actual weight consumed. - /// - /// Written for readability in mind, not efficiency. For example: - /// - /// 1. We assume this is only ever called once per `on_idle`. This is because we know that - /// in all use cases, even a single nominator cannot be unbonded in a single call. Multiple - /// calls to this function are thus not needed. - /// - /// 2. We will only mark a staker as unstaked if at the beginning of a check cycle, they are - /// found out to have no eras to check. At the end of a check cycle, even if they are fully - /// checked, we don't finish the process. - pub(crate) fn do_on_idle(remaining_weight: Weight) -> Weight { - let mut eras_to_check_per_block = ErasToCheckPerBlock::::get(); - if eras_to_check_per_block.is_zero() { - return T::DbWeight::get().reads(1) - } - - // NOTE: here we're assuming that the number of validators has only ever increased, - // meaning that the number of exposures to check is either this per era, or less. - let validator_count = T::Staking::desired_validator_count(); - - // determine the number of eras to check. This is based on both `ErasToCheckPerBlock` - // and `remaining_weight` passed on to us from the runtime executive. - let max_weight = |v, u| { - ::WeightInfo::on_idle_check(v * u) - .max(::WeightInfo::on_idle_unstake()) - }; - while max_weight(validator_count, eras_to_check_per_block).any_gt(remaining_weight) { - eras_to_check_per_block.saturating_dec(); - if eras_to_check_per_block.is_zero() { - log!(debug, "early existing because eras_to_check_per_block is zero"); - return T::DbWeight::get().reads(2) - } - } - - if T::Staking::election_ongoing() { - // NOTE: we assume `ongoing` does not consume any weight. - // there is an ongoing election -- we better not do anything. Imagine someone is not - // exposed anywhere in the last era, and the snapshot for the election is already - // taken. In this time period, we don't want to accidentally unstake them. - return T::DbWeight::get().reads(2) - } - - let UnstakeRequest { stashes, mut checked } = match Head::::take().or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - let stashes: BoundedVec<_, T::BatchSize> = Queue::::drain() - .take(T::BatchSize::get() as usize) - .collect::>() - .try_into() - .expect("take ensures bound is met; qed"); - if stashes.is_empty() { - None - } else { - Some(UnstakeRequest { stashes, checked: Default::default() }) - } - }) { - None => { - // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4) - }, - Some(head) => head, - }; - - log!( - debug, - "checking {:?} stashes, eras_to_check_per_block = {:?}, remaining_weight = {:?}", - stashes.len(), - eras_to_check_per_block, - remaining_weight - ); - - // the range that we're allowed to check in this round. - let current_era = T::Staking::current_era(); - let bonding_duration = T::Staking::bonding_duration(); - - // prune all the old eras that we don't care about. This will help us keep the bound - // of `checked`. - checked.retain(|e| *e >= current_era.saturating_sub(bonding_duration)); - - let unchecked_eras_to_check = { - // get the last available `bonding_duration` eras up to current era in reverse - // order. - let total_check_range = (current_era.saturating_sub(bonding_duration)..= - current_era) - .rev() - .collect::>(); - debug_assert!( - total_check_range.len() <= (bonding_duration + 1) as usize, - "{:?}", - total_check_range - ); - - // remove eras that have already been checked, take a maximum of - // eras_to_check_per_block. - total_check_range - .into_iter() - .filter(|e| !checked.contains(e)) - .take(eras_to_check_per_block as usize) - .collect::>() - }; - - log!( - debug, - "{} eras to check: {:?}", - unchecked_eras_to_check.len(), - unchecked_eras_to_check - ); - - let unstake_stash = |stash: T::AccountId, deposit| { - let result = T::Staking::force_unstake(stash.clone()); - let remaining = T::Currency::unreserve(&stash, deposit); - if !remaining.is_zero() { - Self::halt("not enough balance to unreserve"); - } else { - log!(info, "unstaked {:?}, outcome: {:?}", stash, result); - Self::deposit_event(Event::::Unstaked { stash, result }); - } - }; - - let check_stash = |stash, deposit, eras_checked: &mut u32| { - let is_exposed = unchecked_eras_to_check.iter().any(|e| { - eras_checked.saturating_inc(); - T::Staking::is_exposed_in_era(&stash, e) - }); - - if is_exposed { - T::Currency::slash_reserved(&stash, deposit); - log!(info, "slashed {:?} by {:?}", stash, deposit); - Self::deposit_event(Event::::Slashed { stash, amount: deposit }); - false - } else { - true - } - }; - - if unchecked_eras_to_check.is_empty() { - // `stash` is not exposed in any era now -- we can let go of them now. - stashes.into_iter().for_each(|(stash, deposit)| unstake_stash(stash, deposit)); - Self::deposit_event(Event::::BatchFinished); - ::WeightInfo::on_idle_unstake() - } else { - // eras checked so far. - let mut eras_checked = 0u32; - - let pre_length = stashes.len(); - let stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize> = stashes - .into_iter() - .filter(|(stash, deposit)| { - check_stash(stash.clone(), *deposit, &mut eras_checked) - }) - .collect::>() - .try_into() - .expect("filter can only lessen the length; still in bound; qed"); - let post_length = stashes.len(); - - log!( - debug, - "checked {:?} eras, pre stashes: {:?}, post: {:?}", - eras_checked, - pre_length, - post_length, - ); - - match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { - Ok(_) => - if stashes.is_empty() { - Self::deposit_event(Event::::BatchFinished); - } else { - Head::::put(UnstakeRequest { stashes, checked }); - Self::deposit_event(Event::::BatchChecked { - eras: unchecked_eras_to_check, - }); - }, - Err(_) => { - // don't put the head back in -- there is an internal error in the pallet. - Self::halt("checked is pruned via retain above") - }, - } - - ::WeightInfo::on_idle_check(validator_count * eras_checked) - } - } - } -} diff --git a/frame/fast-unstake/src/migrations.rs b/frame/fast-unstake/src/migrations.rs deleted file mode 100644 index 10d8e54134785..0000000000000 --- a/frame/fast-unstake/src/migrations.rs +++ /dev/null @@ -1,77 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod v1 { - use crate::{types::BalanceOf, *}; - use frame_support::{ - storage::unhashed, - traits::{Defensive, Get, GetStorageVersion, OnRuntimeUpgrade}, - weights::Weight, - }; - use sp_staking::EraIndex; - use sp_std::prelude::*; - - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { - fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - - log!( - info, - "Running migration with current storage version {:?} / onchain {:?}", - current, - onchain - ); - - if current == 1 && onchain == 0 { - // if a head exists, then we put them back into the queue. - if Head::::exists() { - if let Some((stash, _, deposit)) = - unhashed::take::<(T::AccountId, Vec, BalanceOf)>( - &Head::::hashed_key(), - ) - .defensive() - { - Queue::::insert(stash, deposit); - current.put::>(); - } else { - // not much we can do here -- head is already deleted. - } - T::DbWeight::get().reads_writes(2, 3) - } else { - T::DbWeight::get().reads(2) - } - } else { - log!(info, "Migration did not execute. This probably should be removed"); - T::DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(Pallet::::on_chain_storage_version(), 0); - Ok(Default::default()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { - assert_eq!(Pallet::::on_chain_storage_version(), 1); - Ok(()) - } - } -} diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs deleted file mode 100644 index d66f4ba5663d9..0000000000000 --- a/frame/fast-unstake/src/mock.rs +++ /dev/null @@ -1,379 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{self as fast_unstake}; -use frame_benchmarking::frame_support::assert_ok; -use frame_support::{ - pallet_prelude::*, - parameter_types, - traits::{ConstU64, Currency}, - weights::constants::WEIGHT_PER_SECOND, -}; -use sp_runtime::traits::{Convert, IdentityLookup}; - -use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; -use sp_std::prelude::*; - -pub type AccountId = u128; -pub type AccountIndex = u32; -pub type BlockNumber = u64; -pub type Balance = u128; -pub type T = Runtime; - -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), - ); -} - -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = ConstU64<5>; - type WeightInfo = (); -} - -parameter_types! { - pub static ExistentialDeposit: Balance = 1; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); -} - -pallet_staking_reward_curve::build! { - const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); -} - -parameter_types! { - pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; - pub static BondingDuration: u32 = 3; - pub static CurrentEra: u32 = 0; - pub static Ongoing: bool = false; - pub static MaxWinners: u32 = 100; -} - -pub struct MockElection; -impl frame_election_provider_support::ElectionProviderBase for MockElection { - type AccountId = AccountId; - type BlockNumber = BlockNumber; - type MaxWinners = MaxWinners; - type DataProvider = Staking; - type Error = (); -} - -impl frame_election_provider_support::ElectionProvider for MockElection { - fn ongoing() -> bool { - Ongoing::get() - } - fn elect() -> Result, Self::Error> { - Err(()) - } -} - -impl pallet_staking::Config for Runtime { - type MaxNominations = ConstU32<16>; - type Currency = Balances; - type CurrencyBalance = Balance; - type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; - type BondingDuration = BondingDuration; - type SessionInterface = (); - type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxNominatorRewardedPerValidator = ConstU32<64>; - type OffendingValidatorsThreshold = (); - type ElectionProvider = MockElection; - type GenesisElectionProvider = Self::ElectionProvider; - type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type TargetList = pallet_staking::UseValidatorsMap; - type MaxUnlockingChunks = ConstU32<32>; - type OnStakerSlash = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); -} - -pub struct BalanceToU256; -impl Convert for BalanceToU256 { - fn convert(n: Balance) -> sp_core::U256 { - n.into() - } -} - -pub struct U256ToBalance; -impl Convert for U256ToBalance { - fn convert(n: sp_core::U256) -> Balance { - n.try_into().unwrap() - } -} - -parameter_types! { - pub static Deposit: u128 = 7; - pub static BatchSize: u32 = 1; -} - -impl fast_unstake::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Deposit = Deposit; - type Currency = Balances; - type Staking = Staking; - type ControlOrigin = frame_system::EnsureRoot; - type BatchSize = BatchSize; - type WeightInfo = (); -} - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -frame_support::construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: frame_system, - Timestamp: pallet_timestamp, - Balances: pallet_balances, - Staking: pallet_staking, - FastUnstake: fast_unstake, - } -); - -parameter_types! { - static FastUnstakeEvents: u32 = 0; -} - -pub(crate) fn fast_unstake_events_since_last_call() -> Vec> { - let events = System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::FastUnstake(inner) = e { Some(inner) } else { None }) - .collect::>(); - let already_seen = FastUnstakeEvents::get(); - FastUnstakeEvents::set(events.len() as u32); - events.into_iter().skip(already_seen as usize).collect() -} - -pub struct ExtBuilder { - unexposed: Vec<(AccountId, AccountId, Balance)>, -} - -impl Default for ExtBuilder { - fn default() -> Self { - Self { - unexposed: vec![ - (1, 2, 7 + 100), - (3, 4, 7 + 100), - (5, 6, 7 + 100), - (7, 8, 7 + 100), - (9, 10, 7 + 100), - ], - } - } -} - -pub(crate) const VALIDATORS_PER_ERA: AccountId = 32; -pub(crate) const VALIDATOR_PREFIX: AccountId = 100; -pub(crate) const NOMINATORS_PER_VALIDATOR_PER_ERA: AccountId = 4; -pub(crate) const NOMINATOR_PREFIX: AccountId = 1000; - -impl ExtBuilder { - pub(crate) fn register_stakers_for_era(era: u32) { - // validators are prefixed with 100 and nominators with 1000 to prevent conflict. Make sure - // all the other accounts used in tests are below 100. Also ensure here that we don't - // overlap. - assert!(VALIDATOR_PREFIX + VALIDATORS_PER_ERA < NOMINATOR_PREFIX); - - (VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA) - .map(|v| { - // for the sake of sanity, let's register this taker as an actual validator. - let others = (NOMINATOR_PREFIX.. - (NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA)) - .map(|n| IndividualExposure { who: n, value: 0 as Balance }) - .collect::>(); - (v, Exposure { total: 0, own: 0, others }) - }) - .for_each(|(validator, exposure)| { - pallet_staking::ErasStakers::::insert(era, validator, exposure); - }); - } - - pub(crate) fn batch(self, size: u32) -> Self { - BatchSize::set(size); - self - } - - pub(crate) fn build(self) -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); - let mut storage = - frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let validators_range = VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA; - let nominators_range = - NOMINATOR_PREFIX..NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA; - - let _ = pallet_balances::GenesisConfig:: { - balances: self - .unexposed - .clone() - .into_iter() - .map(|(stash, _, balance)| (stash, balance * 2)) - .chain( - self.unexposed - .clone() - .into_iter() - .map(|(_, ctrl, balance)| (ctrl, balance * 2)), - ) - .chain(validators_range.clone().map(|x| (x, 7 + 100))) - .chain(nominators_range.clone().map(|x| (x, 7 + 100))) - .collect::>(), - } - .assimilate_storage(&mut storage); - - let _ = pallet_staking::GenesisConfig:: { - stakers: self - .unexposed - .into_iter() - .map(|(x, y, z)| (x, y, z, pallet_staking::StakerStatus::Nominator(vec![42]))) - .chain(validators_range.map(|x| (x, x, 100, StakerStatus::Validator))) - .chain(nominators_range.map(|x| (x, x, 100, StakerStatus::Nominator(vec![x])))) - .collect::>(), - ..Default::default() - } - .assimilate_storage(&mut storage); - - let mut ext = sp_io::TestExternalities::from(storage); - - ext.execute_with(|| { - // for events to be deposited. - frame_system::Pallet::::set_block_number(1); - - for era in 0..=(BondingDuration::get()) { - Self::register_stakers_for_era(era); - } - - // because we read this value as a measure of how many validators we have. - pallet_staking::ValidatorCount::::put(VALIDATORS_PER_ERA as u32); - }); - - ext - } - - pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - self.build().execute_with(|| { - test(); - }) - } -} - -pub(crate) fn run_to_block(n: u64, on_idle: bool) { - let current_block = System::block_number(); - assert!(n > current_block); - while System::block_number() < n { - Balances::on_finalize(System::block_number()); - Staking::on_finalize(System::block_number()); - FastUnstake::on_finalize(System::block_number()); - - System::set_block_number(System::block_number() + 1); - - Balances::on_initialize(System::block_number()); - Staking::on_initialize(System::block_number()); - FastUnstake::on_initialize(System::block_number()); - if on_idle { - FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block); - } - } -} - -pub(crate) fn next_block(on_idle: bool) { - let current = System::block_number(); - run_to_block(current + 1, on_idle); -} - -pub fn assert_unstaked(stash: &AccountId) { - assert!(!pallet_staking::Bonded::::contains_key(stash)); - assert!(!pallet_staking::Payee::::contains_key(stash)); - assert!(!pallet_staking::Validators::::contains_key(stash)); - assert!(!pallet_staking::Nominators::::contains_key(stash)); -} - -pub fn create_exposed_nominator(exposed: AccountId, era: u32) { - // create an exposed nominator in era 1 - pallet_staking::ErasStakers::::mutate(era, VALIDATORS_PER_ERA, |expo| { - expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); - }); - Balances::make_free_balance_be(&exposed, 100); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(exposed), - exposed, - 10, - pallet_staking::RewardDestination::Staked - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); - // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); -} diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs deleted file mode 100644 index 522aa1d0fac28..0000000000000 --- a/frame/fast-unstake/src/tests.rs +++ /dev/null @@ -1,1200 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tests for pallet-fast-unstake. - -use super::*; -use crate::{mock::*, types::*, weights::WeightInfo, Event}; -use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; -use pallet_staking::{CurrentEra, RewardDestination}; - -use sp_runtime::traits::BadOrigin; -use sp_staking::StakingInterface; - -#[test] -fn test_setup_works() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::bonding_duration(), 3); - }); -} - -#[test] -fn register_works() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - // Ensure stash is in the queue. - assert_ne!(Queue::::get(1), None); - }); -} - -#[test] -fn register_insufficient_funds_fails() { - use pallet_balances::Error as BalancesError; - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - ::Currency::make_free_balance_be(&1, 3); - - // Controller account registers for fast unstake. - assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), - BalancesError::::InsufficientBalance, - ); - - // Ensure stash is in the queue. - assert_eq!(Queue::::get(1), None); - }); -} - -#[test] -fn register_disabled_fails() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), - Error::::CallNotAllowed - ); - }); -} - -#[test] -fn cannot_register_if_not_bonded() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Mint accounts 1 and 2 with 200 tokens. - for _ in 1..2 { - let _ = Balances::make_free_balance_be(&1, 200); - } - // Attempt to fast unstake. - assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), - Error::::NotController - ); - }); -} - -#[test] -fn cannot_register_if_in_queue() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Insert some Queue item - Queue::::insert(1, 10); - // Cannot re-register, already in queue - assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), - Error::::AlreadyQueued - ); - }); -} - -#[test] -fn cannot_register_if_head() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Insert some Head item for stash - Head::::put(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![], - }); - // Controller attempts to regsiter - assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), - Error::::AlreadyHead - ); - }); -} - -#[test] -fn cannot_register_if_has_unlocking_chunks() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Start unbonding half of staked tokens - assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); - // Cannot register for fast unstake with unlock chunks active - assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), - Error::::NotFullyBonded - ); - }); -} - -#[test] -fn deregister_works() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - - assert_eq!(::Currency::reserved_balance(&1), 0); - - // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(::Currency::reserved_balance(&1), Deposit::get()); - - // Controller then changes mind and deregisters. - assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); - assert_eq!(::Currency::reserved_balance(&1), 0); - - // Ensure stash no longer exists in the queue. - assert_eq!(Queue::::get(1), None); - }); -} - -#[test] -fn deregister_disabled_fails() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - ErasToCheckPerBlock::::put(0); - assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::CallNotAllowed); - }); -} - -#[test] -fn cannot_deregister_if_not_controller() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - // Stash tries to deregister. - assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(1)), Error::::NotController); - }); -} - -#[test] -fn cannot_deregister_if_not_queued() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Controller tries to deregister without first registering - assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::NotQueued); - }); -} - -#[test] -fn cannot_deregister_already_head() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - // Controller attempts to register, should fail - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - // Insert some Head item for stash. - Head::::put(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![], - }); - // Controller attempts to deregister - assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); - }); -} - -#[test] -fn control_works() { - ExtBuilder::default().build_and_execute(|| { - // account with control (root) origin wants to only check 1 era per block. - assert_ok!(FastUnstake::control(RuntimeOrigin::root(), 1_u32)); - }); -} - -#[test] -fn control_must_be_control_origin() { - ExtBuilder::default().build_and_execute(|| { - // account without control (root) origin wants to only check 1 era per block. - assert_noop!(FastUnstake::control(RuntimeOrigin::signed(1), 1_u32), BadOrigin); - }); -} - -mod on_idle { - use super::*; - - #[test] - fn early_exit() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // set up Queue item - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - // call on_idle with no remaining weight - FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); - - // assert nothing changed in Queue and Head - assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - }); - } - - #[test] - fn respects_weight() { - ExtBuilder::default().build_and_execute(|| { - // we want to check all eras in one block... - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - assert_eq!(Queue::::count(), 1); - assert_eq!(Head::::get(), None); - - // when: call fast unstake with not enough weight to process the whole thing, just one - // era. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 1, - ); - assert_eq!(FastUnstake::on_idle(0, remaining_weight), remaining_weight); - - // then - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::BatchChecked { eras: vec![3] }] - ); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3] - }) - ); - - // when: another 1 era. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 1, - ); - assert_eq!(FastUnstake::on_idle(0, remaining_weight), remaining_weight); - - // then: - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::BatchChecked { eras: bounded_vec![2] }] - ); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - // when: then 5 eras, we only need 2 more. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 5, - ); - assert_eq!( - FastUnstake::on_idle(0, remaining_weight), - // note the amount of weight consumed: 2 eras worth of weight. - ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 2, - ) - ); - - // then: - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::BatchChecked { eras: vec![1, 0] }] - ); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when: not enough weight to unstake: - let remaining_weight = - ::WeightInfo::on_idle_unstake() - Weight::from_ref_time(1); - assert_eq!(FastUnstake::on_idle(0, remaining_weight), Weight::from_ref_time(0)); - - // then nothing happens: - assert_eq!(fast_unstake_events_since_last_call(), vec![]); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when: enough weight to get over at least one iteration: then we are unblocked and can - // unstake. - let remaining_weight = ::WeightInfo::on_idle_check( - pallet_staking::ValidatorCount::::get() * 1, - ); - assert_eq!( - FastUnstake::on_idle(0, remaining_weight), - ::WeightInfo::on_idle_unstake() - ); - - // then we finish the unbonding: - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::Unstaked { stash: 1, result: Ok(()) }, Event::BatchFinished], - ); - assert_eq!(Head::::get(), None,); - - assert_unstaked(&1); - }); - } - - #[test] - fn if_head_not_set_one_random_fetched_from_queue() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // given - assert_eq!(::Currency::reserved_balance(&1), 0); - - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); - - assert_eq!(::Currency::reserved_balance(&1), Deposit::get()); - - assert_eq!(Queue::::count(), 5); - assert_eq!(Head::::get(), None); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - assert_eq!(Queue::::count(), 4); - - // when - next_block(true); - - // then - assert_eq!(Head::::get(), None,); - assert_eq!(Queue::::count(), 4); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(5, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }), - ); - assert_eq!(Queue::::count(), 3); - - assert_eq!(::Currency::reserved_balance(&1), 0); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished, - Event::BatchChecked { eras: vec![3, 2, 1, 0] } - ] - ); - }); - } - - #[test] - fn successful_multi_queue() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // register multi accounts for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_eq!(Queue::::get(3), Some(Deposit::get())); - - // assert 2 queue items are in Queue & None in Head to start with - assert_eq!(Queue::::count(), 2); - assert_eq!(Head::::get(), None); - - // process on idle and check eras for next Queue item - next_block(true); - - // process on idle & let go of current Head - next_block(true); - - // confirm Head / Queue items remaining - assert_eq!(Queue::::count(), 1); - assert_eq!(Head::::get(), None); - - // process on idle and check eras for next Queue item - next_block(true); - - // process on idle & let go of current Head - next_block(true); - - // Head & Queue should now be empty - assert_eq!(Head::::get(), None); - assert_eq!(Queue::::count(), 0); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished, - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 3, result: Ok(()) }, - Event::BatchFinished, - ] - ); - - assert_unstaked(&1); - assert_unstaked(&3); - }); - } - - #[test] - fn successful_unstake() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - // process on idle - next_block(true); - - // assert queue item has been moved to head - assert_eq!(Queue::::get(1), None); - - // assert head item present - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - next_block(true); - assert_eq!(Head::::get(), None,); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn successful_unstake_all_eras_per_block() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - Balances::make_free_balance_be(&2, 100); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - // process on idle - next_block(true); - - // assert queue item has been moved to head - assert_eq!(Queue::::get(1), None); - - // assert head item present - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - next_block(true); - assert_eq!(Head::::get(), None,); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn successful_unstake_one_era_per_block() { - ExtBuilder::default().build_and_execute(|| { - // put 1 era per block - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - // process on idle - next_block(true); - - // assert queue item has been moved to head - assert_eq!(Queue::::get(1), None); - - // assert head item present - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3] - }) - ); - - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1] - }) - ); - - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - next_block(true); - - assert_eq!(Head::::get(), None,); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3] }, - Event::BatchChecked { eras: vec![2] }, - Event::BatchChecked { eras: vec![1] }, - Event::BatchChecked { eras: vec![0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn old_checked_era_pruned() { - // the only scenario where checked era pruning (checked.retain) comes handy is a follows: - // the whole vector is full and at capacity and in the next call we are ready to unstake, - // but then a new era happens. - ExtBuilder::default().build_and_execute(|| { - // given - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(Deposit::get())); - - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3] - }) - ); - - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1] - }) - ); - - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when: a new era happens right before one is free. - CurrentEra::::put(CurrentEra::::get().unwrap() + 1); - ExtBuilder::register_stakers_for_era(CurrentEra::::get().unwrap()); - - // then - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - // note era 0 is pruned to keep the vector length sane. - checked: bounded_vec![3, 2, 1, 4], - }) - ); - - next_block(true); - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3] }, - Event::BatchChecked { eras: vec![2] }, - Event::BatchChecked { eras: vec![1] }, - Event::BatchChecked { eras: vec![0] }, - Event::BatchChecked { eras: vec![4] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn unstake_paused_mid_election() { - ExtBuilder::default().build_and_execute(|| { - // give: put 1 era per block - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - // process 2 blocks - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3] - }) - ); - - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - // when - Ongoing::set(true); - - // then nothing changes - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - - // then we register a new era. - Ongoing::set(false); - CurrentEra::::put(CurrentEra::::get().unwrap() + 1); - ExtBuilder::register_stakers_for_era(CurrentEra::::get().unwrap()); - - // then we can progress again, but notice that the new era that had to be checked. - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 4] - }) - ); - - // progress to end - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get())], - checked: bounded_vec![3, 2, 4, 1] - }) - ); - - // but notice that we don't care about era 0 instead anymore! we're done. - next_block(true); - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3] }, - Event::BatchChecked { eras: vec![2] }, - Event::BatchChecked { eras: vec![4] }, - Event::BatchChecked { eras: vec![1] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::BatchFinished - ] - ); - - assert_unstaked(&1); - }); - } - - #[test] - fn exposed_nominator_cannot_unstake() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - - // create an exposed nominator in era 1 - let exposed = 666; - create_exposed_nominator(exposed, 1); - - // a few blocks later, we realize they are slashed - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(exposed, Deposit::get())], - checked: bounded_vec![3] - }) - ); - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(exposed, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - next_block(true); - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3] }, - Event::BatchChecked { eras: vec![2] }, - Event::Slashed { stash: exposed, amount: Deposit::get() }, - Event::BatchFinished - ] - ); - }); - } - - #[test] - fn exposed_nominator_cannot_unstake_multi_check() { - ExtBuilder::default().build_and_execute(|| { - // same as the previous check, but we check 2 eras per block, and we make the exposed be - // exposed in era 0, so that it is detected halfway in a check era. - ErasToCheckPerBlock::::put(2); - CurrentEra::::put(BondingDuration::get()); - - // create an exposed nominator in era 0 - let exposed = 666; - create_exposed_nominator(exposed, 0); - - // a few blocks later, we realize they are slashed - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(exposed, Deposit::get())], - checked: bounded_vec![3, 2] - }) - ); - next_block(true); - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - // we slash them - vec![ - Event::BatchChecked { eras: vec![3, 2] }, - Event::Slashed { stash: exposed, amount: Deposit::get() }, - Event::BatchFinished - ] - ); - }); - } - - #[test] - fn validators_cannot_bail() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // a validator switches role and register... - assert_ok!(Staking::nominate( - RuntimeOrigin::signed(VALIDATOR_PREFIX), - vec![VALIDATOR_PREFIX] - )); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX))); - - // but they indeed are exposed! - assert!(pallet_staking::ErasStakers::::contains_key( - BondingDuration::get() - 1, - VALIDATOR_PREFIX - )); - - // process a block, this validator is exposed and has been slashed. - next_block(true); - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![Event::Slashed { stash: 100, amount: Deposit::get() }, Event::BatchFinished] - ); - }); - } - - #[test] - fn unexposed_validator_can_fast_unstake() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // create a new validator that 100% not exposed. - Balances::make_free_balance_be(&42, 100 + Deposit::get()); - assert_ok!(Staking::bond(RuntimeOrigin::signed(42), 42, 10, RewardDestination::Staked)); - assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); - - // let them register: - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42))); - - // 2 block's enough to unstake them. - next_block(true); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(42, Deposit::get())], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - next_block(true); - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 42, result: Ok(()) }, - Event::BatchFinished - ] - ); - }); - } -} - -mod batched { - use super::*; - - #[test] - fn single_block_batched_successful() { - ExtBuilder::default().batch(3).build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); - - assert_eq!(Queue::::count(), 4); - assert_eq!(Head::::get(), None); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![ - (1, Deposit::get()), - (5, Deposit::get()), - (7, Deposit::get()) - ], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - assert_eq!(Queue::::count(), 1); - - // when - next_block(true); - - // then - assert_eq!(Head::::get(), None); - assert_eq!(Queue::::count(), 1); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::Unstaked { stash: 5, result: Ok(()) }, - Event::Unstaked { stash: 7, result: Ok(()) }, - Event::BatchFinished - ] - ); - }); - } - - #[test] - fn multi_block_batched_successful() { - ExtBuilder::default().batch(3).build_and_execute(|| { - ErasToCheckPerBlock::::put(2); - CurrentEra::::put(BondingDuration::get()); - - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); - - assert_eq!(Queue::::count(), 4); - assert_eq!(Head::::get(), None); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![ - (1, Deposit::get()), - (5, Deposit::get()), - (7, Deposit::get()) - ], - checked: bounded_vec![3, 2] - }) - ); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![ - (1, Deposit::get()), - (5, Deposit::get()), - (7, Deposit::get()) - ], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when - next_block(true); - - // then - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::BatchChecked { eras: vec![3, 2] }, - Event::BatchChecked { eras: vec![1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::Unstaked { stash: 5, result: Ok(()) }, - Event::Unstaked { stash: 7, result: Ok(()) }, - Event::BatchFinished - ] - ); - }); - } - - #[test] - fn multi_block_batched_some_fail() { - ExtBuilder::default().batch(4).build_and_execute(|| { - ErasToCheckPerBlock::::put(2); - CurrentEra::::put(BondingDuration::get()); - - // register two good ones. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - create_exposed_nominator(666, 1); - create_exposed_nominator(667, 3); - - // then - assert_eq!(Queue::::count(), 4); - assert_eq!(Head::::get(), None); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![ - (1, Deposit::get()), - (3, Deposit::get()), - (666, Deposit::get()) - ], - checked: bounded_vec![3, 2] - }) - ); - - // when - next_block(true); - - // then - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get()), (3, Deposit::get()),], - checked: bounded_vec![3, 2, 1, 0] - }) - ); - - // when - next_block(true); - - // then - assert_eq!(Head::::get(), None); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::Slashed { stash: 667, amount: 7 }, - Event::BatchChecked { eras: vec![3, 2] }, - Event::Slashed { stash: 666, amount: 7 }, - Event::BatchChecked { eras: vec![1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) }, - Event::Unstaked { stash: 3, result: Ok(()) }, - Event::BatchFinished - ] - ); - }); - } - - #[test] - fn multi_block_batched_all_fail_early_exit() { - ExtBuilder::default().batch(2).build_and_execute(|| { - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - - // register two bad ones. - create_exposed_nominator(666, 3); - create_exposed_nominator(667, 2); - - // then - assert_eq!(Queue::::count(), 2); - assert_eq!(Head::::get(), None); - - // when we progress a block.. - next_block(true); - - // ..and register two good ones. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - - // then one of the bad ones is reaped. - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(667, Deposit::get())], - checked: bounded_vec![3] - }) - ); - - // when we go to next block - next_block(true); - - // then the head is empty, we early terminate the batch. - assert_eq!(Head::::get(), None); - - // upon next block, we will assemble a new head. - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stashes: bounded_vec![(1, Deposit::get()), (3, Deposit::get()),], - checked: bounded_vec![3] - }) - ); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::Slashed { stash: 666, amount: Deposit::get() }, - Event::BatchChecked { eras: vec![3] }, - Event::Slashed { stash: 667, amount: Deposit::get() }, - Event::BatchFinished, - Event::BatchChecked { eras: vec![3] } - ] - ); - }); - } -} diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs deleted file mode 100644 index 34ca6517f3168..0000000000000 --- a/frame/fast-unstake/src/types.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Types used in the Fast Unstake pallet. - -use crate::{Config, MaxChecking}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - traits::Currency, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, -}; -use scale_info::TypeInfo; -use sp_staking::EraIndex; -use sp_std::prelude::*; - -pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -/// An unstake request. -#[derive( - Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, -)] -#[scale_info(skip_type_params(T))] -pub struct UnstakeRequest { - /// This list of stashes being processed in this request, and their corresponding deposit. - pub(crate) stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize>, - /// The list of eras for which they have been checked. - pub(crate) checked: BoundedVec>, -} diff --git a/frame/fast-unstake/src/weights.rs b/frame/fast-unstake/src/weights.rs deleted file mode 100644 index 6001250e8c24d..0000000000000 --- a/frame/fast-unstake/src/weights.rs +++ /dev/null @@ -1,212 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for pallet_fast_unstake -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/substrate -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_fast_unstake -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/fast-unstake/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_fast_unstake. -pub trait WeightInfo { - fn on_idle_unstake() -> Weight; - fn on_idle_check(x: u32, ) -> Weight; - fn register_fast_unstake() -> Weight; - fn deregister() -> Weight; - fn control() -> Weight; -} - -/// Weights for pallet_fast_unstake using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Bonded (r:1 w:1) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) - // Storage: System Account (r:1 w:1) - // Storage: Balances Locks (r:1 w:1) - // Storage: Staking Ledger (r:0 w:1) - // Storage: Staking Payee (r:0 w:1) - fn on_idle_unstake() -> Weight { - // Minimum execution time: 82_426 nanoseconds. - Weight::from_ref_time(83_422_000 as u64) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake Queue (r:2 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking ErasStakers (r:1344 w:0) - /// The range of component `x` is `[672, 86016]`. - fn on_idle_check(x: u32, ) -> Weight { - // Minimum execution time: 13_932_777 nanoseconds. - Weight::from_ref_time(13_996_029_000 as u64) - // Standard Error: 16_878 - .saturating_add(Weight::from_ref_time(18_113_540 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(345 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:1) - // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Balances Locks (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) - fn register_fast_unstake() -> Weight { - // Minimum execution time: 120_190 nanoseconds. - Weight::from_ref_time(121_337_000 as u64) - .saturating_add(T::DbWeight::get().reads(14 as u64)) - .saturating_add(T::DbWeight::get().writes(9 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:0) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: FastUnstake CounterForQueue (r:1 w:1) - fn deregister() -> Weight { - // Minimum execution time: 49_897 nanoseconds. - Weight::from_ref_time(50_080_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) - fn control() -> Weight { - // Minimum execution time: 4_814 nanoseconds. - Weight::from_ref_time(4_997_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Bonded (r:1 w:1) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) - // Storage: System Account (r:1 w:1) - // Storage: Balances Locks (r:1 w:1) - // Storage: Staking Ledger (r:0 w:1) - // Storage: Staking Payee (r:0 w:1) - fn on_idle_unstake() -> Weight { - // Minimum execution time: 82_426 nanoseconds. - Weight::from_ref_time(83_422_000 as u64) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking ValidatorCount (r:1 w:0) - // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - // Storage: FastUnstake Head (r:1 w:1) - // Storage: FastUnstake Queue (r:2 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking ErasStakers (r:1344 w:0) - /// The range of component `x` is `[672, 86016]`. - fn on_idle_check(x: u32, ) -> Weight { - // Minimum execution time: 13_932_777 nanoseconds. - Weight::from_ref_time(13_996_029_000 as u64) - // Standard Error: 16_878 - .saturating_add(Weight::from_ref_time(18_113_540 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(345 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:1) - // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Balances Locks (r:1 w:1) - // Storage: FastUnstake CounterForQueue (r:1 w:1) - fn register_fast_unstake() -> Weight { - // Minimum execution time: 120_190 nanoseconds. - Weight::from_ref_time(121_337_000 as u64) - .saturating_add(RocksDbWeight::get().reads(14 as u64)) - .saturating_add(RocksDbWeight::get().writes(9 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - // Storage: Staking Ledger (r:1 w:0) - // Storage: FastUnstake Queue (r:1 w:1) - // Storage: FastUnstake Head (r:1 w:0) - // Storage: FastUnstake CounterForQueue (r:1 w:1) - fn deregister() -> Weight { - // Minimum execution time: 49_897 nanoseconds. - Weight::from_ref_time(50_080_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) - fn control() -> Weight { - // Minimum execution time: 4_814 nanoseconds. - Weight::from_ref_time(4_997_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } -} diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml index 8c60c847027a3..d6f61c6d250ba 100644 --- a/frame/gilt/Cargo.toml +++ b/frame/gilt/Cargo.toml @@ -31,7 +31,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs index 92ebf81854f23..3df08372f499b 100644 --- a/frame/gilt/src/benchmarking.rs +++ b/frame/gilt/src/benchmarking.rs @@ -77,7 +77,7 @@ benchmarks! { set_target { let origin = T::AdminOrigin::successful_origin(); - }: _(origin, Default::default()) + }: _(origin, Default::default()) verify {} thaw { @@ -97,7 +97,7 @@ benchmarks! { pursue_target_per_item { // bids taken - let b in 0..T::MaxQueueLen::get(); + let b in 1..T::MaxQueueLen::get(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(b + 1)); @@ -113,7 +113,7 @@ benchmarks! { pursue_target_per_queue { // total queues hit - let q in 0..T::QueueCount::get(); + let q in 1..T::QueueCount::get(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(q + 1)); diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 28a0f5fd56e67..59522f9a106f2 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -98,7 +98,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// Overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Currency type that this works on. type Currency: ReservableCurrency; @@ -116,7 +116,7 @@ pub mod pallet { + MaxEncodedLen; /// Origin required for setting the target proportion to be under gilt. - type AdminOrigin: EnsureOrigin; + type AdminOrigin: EnsureOrigin; /// Unbalanced handler to account for funds created (in case of a higher total issuance over /// freezing period). @@ -335,7 +335,7 @@ pub mod pallet { if (n % T::IntakePeriod::get()).is_zero() { Self::pursue_target(T::MaxIntakeBids::get()) } else { - Weight::zero() + 0 } } } diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs index e1cdf6507ef58..369b34ba77f44 100644 --- a/frame/gilt/src/mock.rs +++ b/frame/gilt/src/mock.rs @@ -49,8 +49,8 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -75,7 +75,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = frame_support::traits::ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -92,7 +92,7 @@ ord_parameter_types! { } impl pallet_gilt::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type CurrencyBalance = ::Balance; type AdminOrigin = frame_system::EnsureSignedBy; diff --git a/frame/gilt/src/tests.rs b/frame/gilt/src/tests.rs index 2ac369dd3b8b3..486601b5b2f21 100644 --- a/frame/gilt/src/tests.rs +++ b/frame/gilt/src/tests.rs @@ -49,8 +49,8 @@ fn set_target_works() { new_test_ext().execute_with(|| { run_to_block(1); let e = DispatchError::BadOrigin; - assert_noop!(Gilt::set_target(RuntimeOrigin::signed(2), Perquintill::from_percent(50)), e); - assert_ok!(Gilt::set_target(RuntimeOrigin::signed(1), Perquintill::from_percent(50))); + assert_noop!(Gilt::set_target(Origin::signed(2), Perquintill::from_percent(50)), e); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(50))); assert_eq!( ActiveTotal::::get(), @@ -68,19 +68,13 @@ fn set_target_works() { fn place_bid_works() { new_test_ext().execute_with(|| { run_to_block(1); + assert_noop!(Gilt::place_bid(Origin::signed(1), 1, 2), Error::::AmountTooSmall); assert_noop!( - Gilt::place_bid(RuntimeOrigin::signed(1), 1, 2), - Error::::AmountTooSmall - ); - assert_noop!( - Gilt::place_bid(RuntimeOrigin::signed(1), 101, 2), + Gilt::place_bid(Origin::signed(1), 101, 2), BalancesError::::InsufficientBalance ); - assert_noop!( - Gilt::place_bid(RuntimeOrigin::signed(1), 10, 4), - Error::::DurationTooBig - ); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 4), Error::::DurationTooBig); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 10); assert_eq!(Queues::::get(2), vec![GiltBid { amount: 10, who: 1 }]); assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); @@ -91,16 +85,16 @@ fn place_bid_works() { fn place_bid_queuing_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 20, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 5, 2)); - assert_noop!(Gilt::place_bid(RuntimeOrigin::signed(1), 5, 2), Error::::BidTooLow); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 15, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 20, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 5, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(1), 5, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(Origin::signed(1), 15, 2)); assert_eq!(Balances::reserved_balance(1), 45); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 25, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 25, 2)); assert_eq!(Balances::reserved_balance(1), 60); - assert_noop!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2), Error::::BidTooLow); + assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 2), Error::::BidTooLow); assert_eq!( Queues::::get(2), vec![ @@ -117,11 +111,11 @@ fn place_bid_queuing_works() { fn place_bid_fails_when_queue_full() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(3), 10, 2)); - assert_noop!(Gilt::place_bid(RuntimeOrigin::signed(4), 10, 2), Error::::BidTooLow); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(4), 10, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 10, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(4), 10, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(Origin::signed(4), 10, 3)); }); } @@ -129,11 +123,11 @@ fn place_bid_fails_when_queue_full() { fn multiple_place_bids_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 3)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); assert_eq!(Balances::reserved_balance(1), 40); assert_eq!(Balances::reserved_balance(2), 10); @@ -155,9 +149,9 @@ fn multiple_place_bids_works() { fn retract_single_item_queue_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 1)); assert_eq!(Balances::reserved_balance(1), 10); assert_eq!(Queues::::get(1), vec![]); @@ -170,12 +164,12 @@ fn retract_single_item_queue_works() { fn retract_with_other_and_duplicate_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); - assert_ok!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 20); assert_eq!(Balances::reserved_balance(2), 10); assert_eq!(Queues::::get(1), vec![GiltBid { amount: 10, who: 1 },]); @@ -191,11 +185,11 @@ fn retract_with_other_and_duplicate_works() { fn retract_non_existent_item_fails() { new_test_ext().execute_with(|| { run_to_block(1); - assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 1), Error::::NotFound); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); - assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(1), 20, 1), Error::::NotFound); - assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 2), Error::::NotFound); - assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(2), 10, 1), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 1), Error::::NotFound); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 20, 1), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 2), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(2), 10, 1), Error::::NotFound); }); } @@ -203,8 +197,8 @@ fn retract_non_existent_item_fails() { fn basic_enlarge_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); Gilt::enlarge(40, 2); // Takes 2/2, then stopped because it reaches its max amount @@ -234,10 +228,10 @@ fn basic_enlarge_works() { fn enlarge_respects_bids_limit() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(3), 40, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(4), 40, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(4), 40, 3)); Gilt::enlarge(100, 2); // Should have taken 4/3 and 2/2, then stopped because it's only allowed 2. @@ -275,7 +269,7 @@ fn enlarge_respects_bids_limit() { fn enlarge_respects_amount_limit_and_will_split() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 80, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 80, 1)); Gilt::enlarge(40, 2); // Takes 2/2, then stopped because it reaches its max amount @@ -302,14 +296,14 @@ fn enlarge_respects_amount_limit_and_will_split() { fn basic_thaw_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); Gilt::enlarge(40, 1); run_to_block(3); - assert_noop!(Gilt::thaw(RuntimeOrigin::signed(1), 0), Error::::NotExpired); + assert_noop!(Gilt::thaw(Origin::signed(1), 0), Error::::NotExpired); run_to_block(4); - assert_noop!(Gilt::thaw(RuntimeOrigin::signed(1), 1), Error::::Unknown); - assert_noop!(Gilt::thaw(RuntimeOrigin::signed(2), 0), Error::::NotOwner); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); + assert_noop!(Gilt::thaw(Origin::signed(1), 1), Error::::Unknown); + assert_noop!(Gilt::thaw(Origin::signed(2), 0), Error::::NotOwner); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); assert_eq!( ActiveTotal::::get(), @@ -330,7 +324,7 @@ fn basic_thaw_works() { fn thaw_when_issuance_higher_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 100, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); Gilt::enlarge(100, 1); // Everybody else's balances goes up by 50% @@ -339,7 +333,7 @@ fn thaw_when_issuance_higher_works() { Balances::make_free_balance_be(&4, 150); run_to_block(4); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 150); assert_eq!(Balances::reserved_balance(1), 0); @@ -353,16 +347,16 @@ fn thaw_with_ignored_issuance_works() { // Give account zero some balance. Balances::make_free_balance_be(&0, 200); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 100, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); Gilt::enlarge(100, 1); // Account zero transfers 50 into everyone else's accounts. - assert_ok!(Balances::transfer(RuntimeOrigin::signed(0), 2, 50)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(0), 3, 50)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(0), 4, 50)); + assert_ok!(Balances::transfer(Origin::signed(0), 2, 50)); + assert_ok!(Balances::transfer(Origin::signed(0), 3, 50)); + assert_ok!(Balances::transfer(Origin::signed(0), 4, 50)); run_to_block(4); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); // Account zero changes have been ignored. assert_eq!(Balances::free_balance(1), 150); @@ -374,7 +368,7 @@ fn thaw_with_ignored_issuance_works() { fn thaw_when_issuance_lower_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 100, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); Gilt::enlarge(100, 1); // Everybody else's balances goes down by 25% @@ -383,7 +377,7 @@ fn thaw_when_issuance_lower_works() { Balances::make_free_balance_be(&4, 75); run_to_block(4); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 75); assert_eq!(Balances::reserved_balance(1), 0); @@ -394,9 +388,9 @@ fn thaw_when_issuance_lower_works() { fn multiple_thaws_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 60, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 50, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); Gilt::enlarge(200, 3); // Double everyone's free balances. @@ -405,9 +399,9 @@ fn multiple_thaws_works() { Balances::make_free_balance_be(&4, 200); run_to_block(4); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 1)); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(2), 2)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(1), 1)); + assert_ok!(Gilt::thaw(Origin::signed(2), 2)); assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(2), 200); @@ -418,9 +412,9 @@ fn multiple_thaws_works() { fn multiple_thaws_works_in_alternative_thaw_order() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 60, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 50, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); Gilt::enlarge(200, 3); // Double everyone's free balances. @@ -429,9 +423,9 @@ fn multiple_thaws_works_in_alternative_thaw_order() { Balances::make_free_balance_be(&4, 200); run_to_block(4); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(2), 2)); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 1)); - assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(2), 2)); + assert_ok!(Gilt::thaw(Origin::signed(1), 1)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(2), 200); @@ -442,12 +436,12 @@ fn multiple_thaws_works_in_alternative_thaw_order() { fn enlargement_to_target_works() { new_test_ext().execute_with(|| { run_to_block(2); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 2)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 3)); - assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(3), 40, 3)); - assert_ok!(Gilt::set_target(RuntimeOrigin::signed(1), Perquintill::from_percent(40))); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 3)); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(40))); run_to_block(3); assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 },]); @@ -546,7 +540,7 @@ fn enlargement_to_target_works() { ); // Set target a bit higher to use up the remaining bid. - assert_ok!(Gilt::set_target(RuntimeOrigin::signed(1), Perquintill::from_percent(60))); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(60))); run_to_block(10); // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 82b199b1a6663..952080a2d030b 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_gilt //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/gilt/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/gilt/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,83 +59,71 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { - // Minimum execution time: 42_332 nanoseconds. - Weight::from_ref_time(45_584_514 as u64) - // Standard Error: 129 - .saturating_add(Weight::from_ref_time(45_727 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (41_605_000 as Weight) + // Standard Error: 0 + .saturating_add((62_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - // Minimum execution time: 85_866 nanoseconds. - Weight::from_ref_time(87_171_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (97_715_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { - // Minimum execution time: 44_605 nanoseconds. - Weight::from_ref_time(46_850_108 as u64) - // Standard Error: 135 - .saturating_add(Weight::from_ref_time(34_178 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (42_061_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - // Minimum execution time: 7_331 nanoseconds. - Weight::from_ref_time(7_619_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (5_026_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - // Minimum execution time: 55_143 nanoseconds. - Weight::from_ref_time(55_845_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (47_753_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - // Minimum execution time: 3_386 nanoseconds. - Weight::from_ref_time(3_461_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (1_663_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) - // Storage: Gilt Active (r:0 w:20) - /// The range of component `b` is `[0, 1000]`. + // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - // Minimum execution time: 34_156 nanoseconds. - Weight::from_ref_time(45_262_859 as u64) - // Standard Error: 1_529 - .saturating_add(Weight::from_ref_time(4_181_654 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(b as u64))) + (40_797_000 as Weight) + // Standard Error: 1_000 + .saturating_add((4_122_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - // Storage: Gilt Queues (r:6 w:6) - // Storage: Gilt Active (r:0 w:6) - /// The range of component `q` is `[0, 300]`. + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - // Minimum execution time: 33_526 nanoseconds. - Weight::from_ref_time(37_255_562 as u64) - // Standard Error: 3_611 - .saturating_add(Weight::from_ref_time(7_193_128 as u64).saturating_mul(q as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(q as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(q as u64))) + (14_944_000 as Weight) + // Standard Error: 6_000 + .saturating_add((8_135_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) } } @@ -146,82 +131,70 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { - // Minimum execution time: 42_332 nanoseconds. - Weight::from_ref_time(45_584_514 as u64) - // Standard Error: 129 - .saturating_add(Weight::from_ref_time(45_727 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (41_605_000 as Weight) + // Standard Error: 0 + .saturating_add((62_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - // Minimum execution time: 85_866 nanoseconds. - Weight::from_ref_time(87_171_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (97_715_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { - // Minimum execution time: 44_605 nanoseconds. - Weight::from_ref_time(46_850_108 as u64) - // Standard Error: 135 - .saturating_add(Weight::from_ref_time(34_178 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (42_061_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - // Minimum execution time: 7_331 nanoseconds. - Weight::from_ref_time(7_619_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (5_026_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - // Minimum execution time: 55_143 nanoseconds. - Weight::from_ref_time(55_845_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (47_753_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - // Minimum execution time: 3_386 nanoseconds. - Weight::from_ref_time(3_461_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (1_663_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) - // Storage: Gilt Active (r:0 w:20) - /// The range of component `b` is `[0, 1000]`. + // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - // Minimum execution time: 34_156 nanoseconds. - Weight::from_ref_time(45_262_859 as u64) - // Standard Error: 1_529 - .saturating_add(Weight::from_ref_time(4_181_654 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(b as u64))) + (40_797_000 as Weight) + // Standard Error: 1_000 + .saturating_add((4_122_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - // Storage: Gilt Queues (r:6 w:6) - // Storage: Gilt Active (r:0 w:6) - /// The range of component `q` is `[0, 300]`. + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - // Minimum execution time: 33_526 nanoseconds. - Weight::from_ref_time(37_255_562 as u64) - // Standard Error: 3_611 - .saturating_add(Weight::from_ref_time(7_193_128 as u64).saturating_mul(q as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(q as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(q as u64))) + (14_944_000 as Weight) + // Standard Error: 6_000 + .saturating_add((8_135_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) } } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 4bd17b914cefa..2090a4ea2e228 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -45,7 +45,7 @@ sp-keyring = { version = "6.0.0", path = "../../primitives/keyring" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 4ca94dd576fb7..330e9bb255177 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -34,14 +34,14 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - (35u64 * WEIGHT_PER_MICROS) - .saturating_add((175u64 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + (35 * WEIGHT_PER_MICROS) + .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(95u64 * WEIGHT_PER_MICROS) + .saturating_add(95 * WEIGHT_PER_MICROS) // report offence - .saturating_add(110u64 * WEIGHT_PER_MICROS) - .saturating_add(25u64 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(110 * WEIGHT_PER_MICROS) + .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) // fetching set id -> session index mappings @@ -49,6 +49,6 @@ impl crate::WeightInfo for () { } fn note_stalled() -> Weight { - (3u64 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) + (3 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) } } diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 181d22fba545c..804272c20480f 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -353,9 +353,9 @@ impl Offence self.time_slot } - fn slash_fraction(&self, offenders_count: u32) -> Perbill { + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index fe5b9861853bf..1781f0a8e40a2 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -40,11 +40,11 @@ use fg_primitives::{ GRANDPA_ENGINE_ID, }; use frame_support::{ - dispatch::{DispatchResultWithPostInfo, Pays}, + dispatch::DispatchResultWithPostInfo, pallet_prelude::Get, storage, traits::{KeyOwnerProofSystem, OneSessionHandler}, - weights::Weight, + weights::{Pays, Weight}, WeakBoundedVec, }; use scale_info::TypeInfo; @@ -87,9 +87,12 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The event type of this module. - type RuntimeEvent: From - + Into<::RuntimeEvent> - + IsType<::RuntimeEvent>; + type Event: From + + Into<::Event> + + IsType<::Event>; + + /// The function call. + type Call: From>; /// The proof of key ownership, used for validating equivocation reports /// The proof must include the session index and validator count of the diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs index 81dbd3bab4b67..ab43f7baef4e9 100644 --- a/frame/grandpa/src/migrations/v4.rs +++ b/frame/grandpa/src/migrations/v4.rs @@ -37,7 +37,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::afg", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return Weight::zero() + return 0 } let storage_version = StorageVersion::get::>(); log::info!( @@ -57,7 +57,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { ::BlockWeights::get().max_block } else { - Weight::zero() + 0 } } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 1a97b1345fe5d..5e6c955c441c5 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -71,7 +71,7 @@ impl_opaque_keys! { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -79,16 +79,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -103,10 +103,10 @@ impl frame_system::Config for Test { impl frame_system::offchain::SendTransactionTypes for Test where - RuntimeCall: From, + Call: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type OverarchingCall = Call; + type Extrinsic = TestXt; } parameter_types! { @@ -116,7 +116,7 @@ parameter_types! { /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. impl pallet_session::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = u64; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; @@ -145,7 +145,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); @@ -182,16 +182,13 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { type MaxNominations = ConstU32<16>; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type CurrencyBalance = ::Balance; type Slash = (); @@ -206,19 +203,17 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; - type ElectionProvider = onchain::OnChainExecution; + type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } impl pallet_offences::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } @@ -229,7 +224,8 @@ parameter_types! { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; + type Call = Call; type KeyOwnerProofSystem = Historical; diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 626decd12821e..ab0a9c677b00e 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -25,8 +25,8 @@ use codec::Encode; use fg_primitives::ScheduledChange; use frame_support::{ assert_err, assert_noop, assert_ok, - dispatch::{GetDispatchInfo, Pays}, traits::{Currency, OnFinalize, OneSessionHandler}, + weights::{GetDispatchInfo, Pays}, }; use frame_system::{EventRecord, Phase}; use sp_core::H256; @@ -359,7 +359,7 @@ fn report_equivocation_current_set_works() { // report the equivocation and the tx should be dispatched successfully assert_ok!(Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ),); @@ -437,7 +437,7 @@ fn report_equivocation_old_set_works() { // report the equivocation using the key ownership proof generated on // the old set, the tx should be dispatched successfully assert_ok!(Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ),); @@ -500,7 +500,7 @@ fn report_equivocation_invalid_set_id() { // the call for reporting the equivocation should error assert_err!( Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ), @@ -541,7 +541,7 @@ fn report_equivocation_invalid_session() { // proof from the previous set, the session should be invalid. assert_err!( Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ), @@ -586,7 +586,7 @@ fn report_equivocation_invalid_key_owner_proof() { // proof for a different key than the one in the equivocation proof. assert_err!( Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), invalid_key_owner_proof, ), @@ -617,7 +617,7 @@ fn report_equivocation_invalid_equivocation_proof() { let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof.clone(), ), @@ -723,7 +723,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { // we submit the report Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -823,7 +823,7 @@ fn report_equivocation_has_valid_weight() { .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) - .all(|w| w[0].ref_time() < w[1].ref_time())); + .all(|w| w[0] < w[1])); } #[test] @@ -856,12 +856,12 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.any_gt(Weight::zero())); + assert!(info.weight > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. let post_info = Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof.clone()), key_owner_proof.clone(), ) @@ -875,7 +875,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation again which is invalid now since it is // duplicate. let post_info = Grandpa::report_equivocation_unsigned( - RuntimeOrigin::none(), + Origin::none(), Box::new(equivocation_proof), key_owner_proof, ) diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 92e55c5c2b934..8e821537fd9b2 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -31,7 +31,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 3584eb954b399..b225db4edfa91 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -23,16 +23,13 @@ use super::*; use crate::Pallet as Identity; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; -use frame_support::{ - ensure, - traits::{EnsureOrigin, Get}, -}; +use frame_support::{ensure, traits::Get}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -40,10 +37,8 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); - let registrar_lookup = T::Lookup::unlookup(registrar.clone()); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); - let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, registrar_lookup)?; + Identity::::add_registrar(RawOrigin::Root.into(), registrar.clone())?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; let fields = IdentityFields( @@ -76,11 +71,9 @@ fn create_sub_accounts( } // Set identity so `set_subs` does not fail. - if IdentityOf::::get(who).is_none() { - let _ = T::Currency::make_free_balance_be(who, BalanceOf::::max_value() / 2u32.into()); - let info = create_identity_info::(1); - Identity::::set_identity(who_origin.into(), Box::new(info))?; - } + let _ = T::Currency::make_free_balance_be(who, BalanceOf::::max_value() / 2u32.into()); + let info = create_identity_info::(1); + Identity::::set_identity(who_origin.into(), Box::new(info))?; Ok(subs) } @@ -121,41 +114,33 @@ benchmarks! { add_registrar { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); - let origin = T::RegistrarOrigin::successful_origin(); - let account = T::Lookup::unlookup(account("registrar", r + 1, SEED)); - }: _(origin, account) + }: _(RawOrigin::Root, account("registrar", r + 1, SEED)) verify { ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); } set_identity { let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get(); + let x in 1 .. T::MaxAdditionalFields::get(); let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Add an initial identity let initial_info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?; + Identity::::set_identity(caller_origin.clone(), Box::new(initial_info))?; // User requests judgement from all the registrars, and they approve for i in 0..r { - let registrar: T::AccountId = account("registrar", i, SEED); - let registrar_lookup = T::Lookup::unlookup(registrar.clone()); - let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); - let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); - Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( - RawOrigin::Signed(registrar).into(), + RawOrigin::Signed(account("registrar", i, SEED)).into(), i, caller_lookup.clone(), - Judgement::Reasonable, - T::Hashing::hash_of(&initial_info), + Judgement::Reasonable )?; } caller @@ -171,7 +156,7 @@ benchmarks! { set_subs_new { let caller: T::AccountId = whitelisted_caller(); // Create a new subs vec with s sub accounts - let s in 0 .. T::MaxSubAccounts::get() => (); + let s in 1 .. T::MaxSubAccounts::get() => (); let subs = create_sub_accounts::(&caller, s)?; ensure!(SubsOf::::get(&caller).1.len() == 0, "Caller already has subs"); }: set_subs(RawOrigin::Signed(caller.clone()), subs) @@ -182,7 +167,7 @@ benchmarks! { set_subs_old { let caller: T::AccountId = whitelisted_caller(); // Give them p many previous sub accounts. - let p in 0 .. T::MaxSubAccounts::get() => { + let p in 1 .. T::MaxSubAccounts::get() => { let _ = add_sub_accounts::(&caller, p)?; }; // Remove all subs. @@ -198,37 +183,32 @@ benchmarks! { clear_identity { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 0 .. T::MaxSubAccounts::get() => { + let s in 1 .. T::MaxSubAccounts::get() => { // Give them s many sub accounts let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; }; - let x in 0 .. T::MaxAdditionalFields::get(); - - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); - Identity::::set_identity(caller_origin.clone(), Box::new(info.clone()))?; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, Box::new(info))?; + }; // User requests judgement from all the registrars, and they approve for i in 0..r { - let registrar: T::AccountId = account("registrar", i, SEED); - let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); - let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); - Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( - RawOrigin::Signed(registrar).into(), + RawOrigin::Signed(account("registrar", i, SEED)).into(), i, caller_lookup.clone(), - Judgement::Reasonable, - T::Hashing::hash_of(&info), + Judgement::Reasonable )?; } ensure!(IdentityOf::::contains_key(&caller), "Identity does not exist."); @@ -242,11 +222,11 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get() => { + let x in 1 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, Box::new(info))?; }; }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) @@ -256,15 +236,15 @@ benchmarks! { cancel_request { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get() => { + let x in 1 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, Box::new(info))?; }; @@ -276,12 +256,10 @@ benchmarks! { set_fee { let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller_lookup)?; + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); }: _(RawOrigin::Signed(caller), r, 100u32.into()) @@ -292,17 +270,14 @@ benchmarks! { set_account_id { let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller_lookup)?; + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); - let new_account = T::Lookup::unlookup(account("new", 0, SEED)); - }: _(RawOrigin::Signed(caller), r, new_account) + }: _(RawOrigin::Signed(caller), r, account("new", 0, SEED)) verify { let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed."); @@ -310,13 +285,11 @@ benchmarks! { set_fields { let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller_lookup)?; + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; let fields = IdentityFields( IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter @@ -332,67 +305,58 @@ benchmarks! { provide_judgement { // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::RuntimeOrigin::from(RawOrigin::Signed(user.clone())); + let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get(); - - let info = create_identity_info::(x); - let info_hash = T::Hashing::hash_of(&info); - Identity::::set_identity(user_origin.clone(), Box::new(info))?; + let x in 1 .. T::MaxAdditionalFields::get() => { + let info = create_identity_info::(x); + Identity::::set_identity(user_origin.clone(), Box::new(info))?; + }; - let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller_lookup)?; + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; Identity::::request_judgement(user_origin, r, 10u32.into())?; - }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash) + }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) verify { assert_last_event::(Event::::JudgementGiven { target: user, registrar_index: r }.into()) } kill_identity { let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 0 .. T::MaxSubAccounts::get(); - let x in 0 .. T::MaxAdditionalFields::get(); + let s in 1 .. T::MaxSubAccounts::get(); + let x in 1 .. T::MaxAdditionalFields::get(); let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::RuntimeOrigin = RawOrigin::Signed(target.clone()).into(); - let target_lookup = T::Lookup::unlookup(target.clone()); + let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); + let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); let info = create_identity_info::(x); - Identity::::set_identity(target_origin.clone(), Box::new(info.clone()))?; + Identity::::set_identity(target_origin.clone(), Box::new(info))?; let _ = add_sub_accounts::(&target, s)?; // User requests judgement from all the registrars, and they approve for i in 0..r { - let registrar: T::AccountId = account("registrar", i, SEED); - let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); - let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); - Identity::::request_judgement(target_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( - RawOrigin::Signed(registrar).into(), + RawOrigin::Signed(account("registrar", i, SEED)).into(), i, target_lookup.clone(), - Judgement::Reasonable, - T::Hashing::hash_of(&info), + Judgement::Reasonable )?; } ensure!(IdentityOf::::contains_key(&target), "Identity not set"); - let origin = T::ForceOrigin::successful_origin(); - }: _(origin, target_lookup) + }: _(RawOrigin::Root, target_lookup) verify { ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); } add_sub { - let s in 0 .. T::MaxSubAccounts::get() - 1; + let s in 1 .. T::MaxSubAccounts::get() - 1; let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; @@ -428,7 +392,7 @@ benchmarks! { } quit_sub { - let s in 0 .. T::MaxSubAccounts::get() - 1; + let s in 1 .. T::MaxSubAccounts::get() - 1; let caller: T::AccountId = whitelisted_caller(); let sup = account("super", 0, SEED); diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 95f5a84d8abb7..46f847606903d 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -79,7 +79,7 @@ mod types; pub mod weights; use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; -use sp_runtime::traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}; +use sp_runtime::traits::{AppendZerosInput, Saturating, StaticLookup, Zero}; use sp_std::prelude::*; pub use weights::WeightInfo; @@ -94,7 +94,6 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -105,7 +104,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The currency trait. type Currency: ReservableCurrency; @@ -142,10 +141,10 @@ pub mod pallet { type Slashed: OnUnbalanced>; /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The origin which may add or remove registrars. Root can always do this. - type RegistrarOrigin: EnsureOrigin; + type RegistrarOrigin: EnsureOrigin; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -236,10 +235,6 @@ pub mod pallet { NotSub, /// Sub-account isn't owned by sender. NotOwned, - /// The provided judgement was for a different identity. - JudgementForDifferentIdentity, - /// Error that occurs when there is an issue paying for judgement. - JudgementPaymentFailed, } #[pallet::event] @@ -287,10 +282,9 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] pub fn add_registrar( origin: OriginFor, - account: AccountIdLookupOf, + account: T::AccountId, ) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; - let account = T::Lookup::lookup(account)?; let (i, registrar_count) = >::try_mutate( |registrars| -> Result<(RegistrarIndex, usize), DispatchError> { @@ -678,10 +672,9 @@ pub mod pallet { pub fn set_account_id( origin: OriginFor, #[pallet::compact] index: RegistrarIndex, - new: AccountIdLookupOf, + new: T::AccountId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let new = T::Lookup::lookup(new)?; let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) @@ -750,7 +743,6 @@ pub mod pallet { /// - `target`: the account whose identity the judgement is upon. This must be an account /// with a registered identity. /// - `judgement`: the judgement of the registrar of index `reg_index` about `target`. - /// - `identity`: The hash of the [`IdentityInfo`] for that the judgement is provided. /// /// Emits `JudgementGiven` if successful. /// @@ -768,9 +760,8 @@ pub mod pallet { pub fn provide_judgement( origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, - target: AccountIdLookupOf, + target: ::Source, judgement: Judgement>, - identity: T::Hash, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; @@ -778,25 +769,20 @@ pub mod pallet { >::get() .get(reg_index as usize) .and_then(Option::as_ref) - .filter(|r| r.account == sender) + .and_then(|r| if r.account == sender { Some(r) } else { None }) .ok_or(Error::::InvalidIndex)?; let mut id = >::get(&target).ok_or(Error::::InvalidTarget)?; - if T::Hashing::hash_of(&id.info) != identity { - return Err(Error::::JudgementForDifferentIdentity.into()) - } - let item = (reg_index, judgement); match id.judgements.binary_search_by_key(®_index, |x| x.0) { Ok(position) => { if let Judgement::FeePaid(fee) = id.judgements[position].1 { - T::Currency::repatriate_reserved( + let _ = T::Currency::repatriate_reserved( &target, &sender, fee, BalanceStatus::Free, - ) - .map_err(|_| Error::::JudgementPaymentFailed)?; + ); } id.judgements[position] = item }, @@ -841,7 +827,7 @@ pub mod pallet { ))] pub fn kill_identity( origin: OriginFor, - target: AccountIdLookupOf, + target: ::Source, ) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; @@ -877,7 +863,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] pub fn add_sub( origin: OriginFor, - sub: AccountIdLookupOf, + sub: ::Source, data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -912,7 +898,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] pub fn rename_sub( origin: OriginFor, - sub: AccountIdLookupOf, + sub: ::Source, data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -931,7 +917,10 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] - pub fn remove_sub(origin: OriginFor, sub: AccountIdLookupOf) -> DispatchResult { + pub fn remove_sub( + origin: OriginFor, + sub: ::Source, + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); let sub = T::Lookup::lookup(sub)?; diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 6b0006971f0e6..6066f176a6106 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -50,22 +50,22 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -81,7 +81,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -103,7 +103,7 @@ ord_parameter_types! { type EnsureOneOrRoot = EitherOfDiverse, EnsureSignedBy>; type EnsureTwoOrRoot = EitherOfDiverse, EnsureSignedBy>; impl pallet_identity::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type Slashed = (); type BasicDeposit = ConstU64<10>; @@ -148,44 +148,41 @@ fn editing_subaccounts_should_work() { new_test_ext().execute_with(|| { let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); - assert_noop!( - Identity::add_sub(RuntimeOrigin::signed(10), 20, data(1)), - Error::::NoIdentity - ); + assert_noop!(Identity::add_sub(Origin::signed(10), 20, data(1)), Error::::NoIdentity); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); // first sub account - assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 1, data(1))); + assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); assert_eq!(SuperOf::::get(1), Some((10, data(1)))); assert_eq!(Balances::free_balance(10), 80); // second sub account - assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 2, data(2))); + assert_ok!(Identity::add_sub(Origin::signed(10), 2, data(2))); assert_eq!(SuperOf::::get(1), Some((10, data(1)))); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(Balances::free_balance(10), 70); // third sub account is too many assert_noop!( - Identity::add_sub(RuntimeOrigin::signed(10), 3, data(3)), + Identity::add_sub(Origin::signed(10), 3, data(3)), Error::::TooManySubAccounts ); // rename first sub account - assert_ok!(Identity::rename_sub(RuntimeOrigin::signed(10), 1, data(11))); + assert_ok!(Identity::rename_sub(Origin::signed(10), 1, data(11))); assert_eq!(SuperOf::::get(1), Some((10, data(11)))); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(Balances::free_balance(10), 70); // remove first sub account - assert_ok!(Identity::remove_sub(RuntimeOrigin::signed(10), 1)); + assert_ok!(Identity::remove_sub(Origin::signed(10), 1)); assert_eq!(SuperOf::::get(1), None); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(Balances::free_balance(10), 80); // add third sub account - assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 3, data(3))); + assert_ok!(Identity::add_sub(Origin::signed(10), 3, data(3))); assert_eq!(SuperOf::::get(1), None); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(SuperOf::::get(3), Some((10, data(3)))); @@ -198,27 +195,27 @@ fn resolving_subaccount_ownership_works() { new_test_ext().execute_with(|| { let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(20), Box::new(twenty()))); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(Origin::signed(20), Box::new(twenty()))); // 10 claims 1 as a subaccount - assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 1, data(1))); + assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(10), 80); assert_eq!(Balances::reserved_balance(10), 20); // 20 cannot claim 1 now assert_noop!( - Identity::add_sub(RuntimeOrigin::signed(20), 1, data(1)), + Identity::add_sub(Origin::signed(20), 1, data(1)), Error::::AlreadyClaimed ); // 1 wants to be with 20 so it quits from 10 - assert_ok!(Identity::quit_sub(RuntimeOrigin::signed(1))); + assert_ok!(Identity::quit_sub(Origin::signed(1))); // 1 gets the 10 that 10 paid. assert_eq!(Balances::free_balance(1), 20); assert_eq!(Balances::free_balance(10), 80); assert_eq!(Balances::reserved_balance(10), 10); // 20 can claim 1 now - assert_ok!(Identity::add_sub(RuntimeOrigin::signed(20), 1, data(1))); + assert_ok!(Identity::add_sub(Origin::signed(20), 1, data(1))); }); } @@ -235,10 +232,10 @@ fn trailing_zeros_decodes_into_default_data() { #[test] fn adding_registrar_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); - assert_ok!(Identity::set_fields(RuntimeOrigin::signed(3), 0, fields)); + assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); assert_eq!( Identity::registrars(), vec![Some(RegistrarInfo { account: 3, fee: 10, fields })] @@ -250,11 +247,11 @@ fn adding_registrar_should_work() { fn amount_of_registrars_is_limited() { new_test_ext().execute_with(|| { for i in 1..MaxRegistrars::get() + 1 { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), i as u64)); + assert_ok!(Identity::add_registrar(Origin::signed(1), i as u64)); } let last_registrar = MaxRegistrars::get() as u64 + 1; assert_noop!( - Identity::add_registrar(RuntimeOrigin::signed(1), last_registrar), + Identity::add_registrar(Origin::signed(1), last_registrar), Error::::TooManyRegistrars ); }); @@ -263,18 +260,18 @@ fn amount_of_registrars_is_limited() { #[test] fn registration_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); let mut three_fields = ten(); three_fields.additional.try_push(Default::default()).unwrap(); three_fields.additional.try_push(Default::default()).unwrap(); - assert!(three_fields.additional.try_push(Default::default()).is_err()); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_eq!(three_fields.additional.try_push(Default::default()), Err(())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_eq!(Identity::identity(10).unwrap().info, ten()); assert_eq!(Balances::free_balance(10), 90); - assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); + assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Balances::free_balance(10), 100); - assert_noop!(Identity::clear_identity(RuntimeOrigin::signed(10)), Error::::NotNamed); + assert_noop!(Identity::clear_identity(Origin::signed(10)), Error::::NotNamed); }); } @@ -282,70 +279,27 @@ fn registration_should_work() { fn uninvited_judgement_should_work() { new_test_ext().execute_with(|| { assert_noop!( - Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Reasonable, - H256::random() - ), + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), Error::::InvalidIndex ); - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_noop!( - Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Reasonable, - H256::random() - ), + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), Error::::InvalidTarget ); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_noop!( - Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Reasonable, - H256::random() - ), - Error::::JudgementForDifferentIdentity - ); - - let identity_hash = BlakeTwo256::hash_of(&ten()); - + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_noop!( - Identity::provide_judgement( - RuntimeOrigin::signed(10), - 0, - 10, - Judgement::Reasonable, - identity_hash - ), + Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), Error::::InvalidIndex ); assert_noop!( - Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::FeePaid(1), - identity_hash - ), + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::FeePaid(1)), Error::::InvalidJudgement ); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Reasonable, - identity_hash - )); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); assert_eq!(Identity::identity(10).unwrap().judgements, vec![(0, Judgement::Reasonable)]); }); } @@ -353,16 +307,10 @@ fn uninvited_judgement_should_work() { #[test] fn clearing_judgement_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Reasonable, - BlakeTwo256::hash_of(&ten()) - )); - assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Identity::identity(10), None); }); } @@ -370,15 +318,12 @@ fn clearing_judgement_should_work() { #[test] fn killing_slashing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_noop!(Identity::kill_identity(RuntimeOrigin::signed(1), 10), BadOrigin); - assert_ok!(Identity::kill_identity(RuntimeOrigin::signed(2), 10)); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); + assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); assert_eq!(Identity::identity(10), None); assert_eq!(Balances::free_balance(10), 90); - assert_noop!( - Identity::kill_identity(RuntimeOrigin::signed(2), 10), - Error::::NotNamed - ); + assert_noop!(Identity::kill_identity(Origin::signed(2), 10), Error::::NotNamed); }); } @@ -386,20 +331,17 @@ fn killing_slashing_should_work() { fn setting_subaccounts_should_work() { new_test_ext().execute_with(|| { let mut subs = vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))]; - assert_noop!( - Identity::set_subs(RuntimeOrigin::signed(10), subs.clone()), - Error::::NotFound - ); + assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::NotFound); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 80); assert_eq!(Identity::subs_of(10), (10, vec![20].try_into().unwrap())); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); // push another item and re-set it. subs.push((30, Data::Raw(vec![50; 1].try_into().unwrap()))); - assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); assert_eq!(Identity::subs_of(10), (20, vec![20, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); @@ -407,7 +349,7 @@ fn setting_subaccounts_should_work() { // switch out one of the items and re-set. subs[0] = (40, Data::Raw(vec![60; 1].try_into().unwrap())); - assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); // no change in the balance assert_eq!(Identity::subs_of(10), (20, vec![40, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), None); @@ -415,7 +357,7 @@ fn setting_subaccounts_should_work() { assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1].try_into().unwrap())))); // clear - assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), vec![])); + assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); assert_eq!(Balances::free_balance(10), 90); assert_eq!(Identity::subs_of(10), (0, BoundedVec::default())); assert_eq!(Identity::super_of(30), None); @@ -423,7 +365,7 @@ fn setting_subaccounts_should_work() { subs.push((20, Data::Raw(vec![40; 1].try_into().unwrap()))); assert_noop!( - Identity::set_subs(RuntimeOrigin::signed(10), subs.clone()), + Identity::set_subs(Origin::signed(10), subs.clone()), Error::::TooManySubAccounts ); }); @@ -432,12 +374,12 @@ fn setting_subaccounts_should_work() { #[test] fn clearing_account_should_remove_subaccounts_and_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_ok!(Identity::set_subs( - RuntimeOrigin::signed(10), + Origin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] )); - assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); + assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Balances::free_balance(10), 100); assert!(Identity::super_of(20).is_none()); }); @@ -446,12 +388,12 @@ fn clearing_account_should_remove_subaccounts_and_refund() { #[test] fn killing_account_should_remove_subaccounts_and_not_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_ok!(Identity::set_subs( - RuntimeOrigin::signed(10), + Origin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] )); - assert_ok!(Identity::kill_identity(RuntimeOrigin::signed(2), 10)); + assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); assert_eq!(Balances::free_balance(10), 80); assert!(Identity::super_of(20).is_none()); }); @@ -460,30 +402,18 @@ fn killing_account_should_remove_subaccounts_and_not_refund() { #[test] fn cancelling_requested_judgement_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - assert_noop!( - Identity::cancel_request(RuntimeOrigin::signed(10), 0), - Error::::NoIdentity - ); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); - assert_ok!(Identity::cancel_request(RuntimeOrigin::signed(10), 0)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NoIdentity); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); assert_eq!(Balances::free_balance(10), 90); - assert_noop!( - Identity::cancel_request(RuntimeOrigin::signed(10), 0), - Error::::NotFound - ); + assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Reasonable, - BlakeTwo256::hash_of(&ten()) - )); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); assert_noop!( - Identity::cancel_request(RuntimeOrigin::signed(10), 0), + Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven ); }); @@ -492,86 +422,49 @@ fn cancelling_requested_judgement_should_work() { #[test] fn requesting_judgement_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_noop!( - Identity::request_judgement(RuntimeOrigin::signed(10), 0, 9), + Identity::request_judgement(Origin::signed(10), 0, 9), Error::::FeeChanged ); - assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); // 10 for the judgement request, 10 for the identity. assert_eq!(Balances::free_balance(10), 80); // Re-requesting won't work as we already paid. assert_noop!( - Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10), + Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement ); - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Erroneous, - BlakeTwo256::hash_of(&ten()) - )); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); // Registrar got their payment now. assert_eq!(Balances::free_balance(3), 20); // Re-requesting still won't work as it's erroneous. assert_noop!( - Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10), + Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement ); // Requesting from a second registrar still works. - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 4)); - assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 1, 10)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); + assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); // Re-requesting after the judgement has been reduced works. - assert_ok!(Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::OutOfDate, - BlakeTwo256::hash_of(&ten()) - )); - assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); - }); -} - -#[test] -fn provide_judgement_should_return_judgement_payment_failed_error() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); - assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); - // 10 for the judgement request, 10 for the identity. - assert_eq!(Balances::free_balance(10), 80); - - // This forces judgement payment failed error - Balances::make_free_balance_be(&3, 0); - assert_noop!( - Identity::provide_judgement( - RuntimeOrigin::signed(3), - 0, - 10, - Judgement::Erroneous, - BlakeTwo256::hash_of(&ten()) - ), - Error::::JudgementPaymentFailed - ); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::OutOfDate)); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); }); } #[test] fn field_deposit_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); - assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); assert_ok!(Identity::set_identity( - RuntimeOrigin::signed(10), + Origin::signed(10), Box::new(IdentityInfo { additional: vec![ ( @@ -595,23 +488,23 @@ fn field_deposit_should_work() { #[test] fn setting_account_id_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); // account 4 cannot change the first registrar's identity since it's owned by 3. assert_noop!( - Identity::set_account_id(RuntimeOrigin::signed(4), 0, 3), + Identity::set_account_id(Origin::signed(4), 0, 3), Error::::InvalidIndex ); // account 3 can, because that's the registrar's current account. - assert_ok!(Identity::set_account_id(RuntimeOrigin::signed(3), 0, 4)); + assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); // account 4 can now, because that's their new ID. - assert_ok!(Identity::set_account_id(RuntimeOrigin::signed(4), 0, 3)); + assert_ok!(Identity::set_account_id(Origin::signed(4), 0, 3)); }); } #[test] fn test_has_identity() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert!(Identity::has_identity(&10, IdentityField::Display as u64)); assert!(Identity::has_identity(&10, IdentityField::Legal as u64)); assert!(Identity::has_identity( diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 1f2e8f98e988b..7d3371c31b03b 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,11 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-06-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet // --chain=dev @@ -35,7 +34,6 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/identity/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -71,208 +69,192 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - // Minimum execution time: 20_269 nanoseconds. - Weight::from_ref_time(21_910_543 as u64) - // Standard Error: 4_604 - .saturating_add(Weight::from_ref_time(223_104 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (16_649_000 as Weight) + // Standard Error: 5_000 + .saturating_add((241_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 41_872 nanoseconds. - Weight::from_ref_time(40_230_216 as u64) - // Standard Error: 2_342 - .saturating_add(Weight::from_ref_time(145_168 as u64).saturating_mul(r as u64)) - // Standard Error: 457 - .saturating_add(Weight::from_ref_time(291_732 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (31_322_000 as Weight) + // Standard Error: 10_000 + .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((312_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:2 w:2) - /// The range of component `s` is `[0, 100]`. + // Storage: Identity SuperOf (r:1 w:1) + /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - // Minimum execution time: 12_024 nanoseconds. - Weight::from_ref_time(32_550_819 as u64) - // Standard Error: 5_057 - .saturating_add(Weight::from_ref_time(2_521_245 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (30_012_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_005_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:0 w:2) - /// The range of component `p` is `[0, 100]`. + // Storage: Identity SuperOf (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - // Minimum execution time: 12_232 nanoseconds. - Weight::from_ref_time(34_009_761 as u64) - // Standard Error: 5_047 - .saturating_add(Weight::from_ref_time(1_113_100 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) + (29_623_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_100_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `s` is `[1, 100]`. + /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - // Minimum execution time: 57_144 nanoseconds. - Weight::from_ref_time(41_559_247 as u64) - // Standard Error: 9_996 - .saturating_add(Weight::from_ref_time(146_770 as u64).saturating_mul(r as u64)) - // Standard Error: 1_952 - .saturating_add(Weight::from_ref_time(1_086_673 as u64).saturating_mul(s as u64)) - // Standard Error: 1_952 - .saturating_add(Weight::from_ref_time(162_481 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (34_370_000 as Weight) + // Standard Error: 10_000 + .saturating_add((186_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((1_114_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((189_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 44_726 nanoseconds. - Weight::from_ref_time(41_637_308 as u64) - // Standard Error: 1_907 - .saturating_add(Weight::from_ref_time(219_078 as u64).saturating_mul(r as u64)) - // Standard Error: 372 - .saturating_add(Weight::from_ref_time(309_888 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (34_759_000 as Weight) + // Standard Error: 4_000 + .saturating_add((251_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((340_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 39_719 nanoseconds. - Weight::from_ref_time(38_008_751 as u64) - // Standard Error: 2_394 - .saturating_add(Weight::from_ref_time(181_870 as u64).saturating_mul(r as u64)) - // Standard Error: 467 - .saturating_add(Weight::from_ref_time(314_990 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (32_254_000 as Weight) + // Standard Error: 7_000 + .saturating_add((159_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((347_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - // Minimum execution time: 10_634 nanoseconds. - Weight::from_ref_time(11_383_704 as u64) - // Standard Error: 2_250 - .saturating_add(Weight::from_ref_time(193_094 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (7_858_000 as Weight) + // Standard Error: 3_000 + .saturating_add((190_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - // Minimum execution time: 10_840 nanoseconds. - Weight::from_ref_time(11_638_740 as u64) - // Standard Error: 1_985 - .saturating_add(Weight::from_ref_time(193_016 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (8_011_000 as Weight) + // Standard Error: 3_000 + .saturating_add((187_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - // Minimum execution time: 10_748 nanoseconds. - Weight::from_ref_time(11_346_901 as u64) - // Standard Error: 2_132 - .saturating_add(Weight::from_ref_time(196_630 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (7_970_000 as Weight) + // Standard Error: 3_000 + .saturating_add((175_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 33_682 nanoseconds. - Weight::from_ref_time(31_336_603 as u64) - // Standard Error: 3_056 - .saturating_add(Weight::from_ref_time(200_403 as u64).saturating_mul(r as u64)) - // Standard Error: 565 - .saturating_add(Weight::from_ref_time(525_142 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (24_730_000 as Weight) + // Standard Error: 4_000 + .saturating_add((196_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((341_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `s` is `[1, 100]`. + /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - // Minimum execution time: 68_794 nanoseconds. - Weight::from_ref_time(52_114_486 as u64) - // Standard Error: 4_808 - .saturating_add(Weight::from_ref_time(153_462 as u64).saturating_mul(r as u64)) - // Standard Error: 939 - .saturating_add(Weight::from_ref_time(1_084_612 as u64).saturating_mul(s as u64)) - // Standard Error: 939 - .saturating_add(Weight::from_ref_time(170_112 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (44_988_000 as Weight) + // Standard Error: 10_000 + .saturating_add((201_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((1_126_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[0, 99]`. + /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - // Minimum execution time: 37_914 nanoseconds. - Weight::from_ref_time(43_488_083 as u64) - // Standard Error: 1_631 - .saturating_add(Weight::from_ref_time(118_845 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (36_768_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - // Minimum execution time: 16_124 nanoseconds. - Weight::from_ref_time(18_580_462 as u64) - // Standard Error: 688 - .saturating_add(Weight::from_ref_time(67_220 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (13_474_000 as Weight) + // Standard Error: 0 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - // Minimum execution time: 41_517 nanoseconds. - Weight::from_ref_time(45_123_530 as u64) - // Standard Error: 1_530 - .saturating_add(Weight::from_ref_time(105_429 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (37_720_000 as Weight) + // Standard Error: 1_000 + .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[0, 99]`. + /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - // Minimum execution time: 30_171 nanoseconds. - Weight::from_ref_time(33_355_514 as u64) - // Standard Error: 1_286 - .saturating_add(Weight::from_ref_time(114_716 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (26_848_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -281,207 +263,191 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - // Minimum execution time: 20_269 nanoseconds. - Weight::from_ref_time(21_910_543 as u64) - // Standard Error: 4_604 - .saturating_add(Weight::from_ref_time(223_104 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (16_649_000 as Weight) + // Standard Error: 5_000 + .saturating_add((241_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 41_872 nanoseconds. - Weight::from_ref_time(40_230_216 as u64) - // Standard Error: 2_342 - .saturating_add(Weight::from_ref_time(145_168 as u64).saturating_mul(r as u64)) - // Standard Error: 457 - .saturating_add(Weight::from_ref_time(291_732 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (31_322_000 as Weight) + // Standard Error: 10_000 + .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((312_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:2 w:2) - /// The range of component `s` is `[0, 100]`. + // Storage: Identity SuperOf (r:1 w:1) + /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - // Minimum execution time: 12_024 nanoseconds. - Weight::from_ref_time(32_550_819 as u64) - // Standard Error: 5_057 - .saturating_add(Weight::from_ref_time(2_521_245 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (30_012_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_005_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:0 w:2) - /// The range of component `p` is `[0, 100]`. + // Storage: Identity SuperOf (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - // Minimum execution time: 12_232 nanoseconds. - Weight::from_ref_time(34_009_761 as u64) - // Standard Error: 5_047 - .saturating_add(Weight::from_ref_time(1_113_100 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) + (29_623_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_100_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `s` is `[1, 100]`. + /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - // Minimum execution time: 57_144 nanoseconds. - Weight::from_ref_time(41_559_247 as u64) - // Standard Error: 9_996 - .saturating_add(Weight::from_ref_time(146_770 as u64).saturating_mul(r as u64)) - // Standard Error: 1_952 - .saturating_add(Weight::from_ref_time(1_086_673 as u64).saturating_mul(s as u64)) - // Standard Error: 1_952 - .saturating_add(Weight::from_ref_time(162_481 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (34_370_000 as Weight) + // Standard Error: 10_000 + .saturating_add((186_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((1_114_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((189_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 44_726 nanoseconds. - Weight::from_ref_time(41_637_308 as u64) - // Standard Error: 1_907 - .saturating_add(Weight::from_ref_time(219_078 as u64).saturating_mul(r as u64)) - // Standard Error: 372 - .saturating_add(Weight::from_ref_time(309_888 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (34_759_000 as Weight) + // Standard Error: 4_000 + .saturating_add((251_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((340_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 39_719 nanoseconds. - Weight::from_ref_time(38_008_751 as u64) - // Standard Error: 2_394 - .saturating_add(Weight::from_ref_time(181_870 as u64).saturating_mul(r as u64)) - // Standard Error: 467 - .saturating_add(Weight::from_ref_time(314_990 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (32_254_000 as Weight) + // Standard Error: 7_000 + .saturating_add((159_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((347_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - // Minimum execution time: 10_634 nanoseconds. - Weight::from_ref_time(11_383_704 as u64) - // Standard Error: 2_250 - .saturating_add(Weight::from_ref_time(193_094 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (7_858_000 as Weight) + // Standard Error: 3_000 + .saturating_add((190_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - // Minimum execution time: 10_840 nanoseconds. - Weight::from_ref_time(11_638_740 as u64) - // Standard Error: 1_985 - .saturating_add(Weight::from_ref_time(193_016 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (8_011_000 as Weight) + // Standard Error: 3_000 + .saturating_add((187_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - // Minimum execution time: 10_748 nanoseconds. - Weight::from_ref_time(11_346_901 as u64) - // Standard Error: 2_132 - .saturating_add(Weight::from_ref_time(196_630 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (7_970_000 as Weight) + // Standard Error: 3_000 + .saturating_add((175_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - // Minimum execution time: 33_682 nanoseconds. - Weight::from_ref_time(31_336_603 as u64) - // Standard Error: 3_056 - .saturating_add(Weight::from_ref_time(200_403 as u64).saturating_mul(r as u64)) - // Standard Error: 565 - .saturating_add(Weight::from_ref_time(525_142 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (24_730_000 as Weight) + // Standard Error: 4_000 + .saturating_add((196_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((341_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[0, 100]`. - /// The range of component `x` is `[0, 100]`. + /// The range of component `s` is `[1, 100]`. + /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - // Minimum execution time: 68_794 nanoseconds. - Weight::from_ref_time(52_114_486 as u64) - // Standard Error: 4_808 - .saturating_add(Weight::from_ref_time(153_462 as u64).saturating_mul(r as u64)) - // Standard Error: 939 - .saturating_add(Weight::from_ref_time(1_084_612 as u64).saturating_mul(s as u64)) - // Standard Error: 939 - .saturating_add(Weight::from_ref_time(170_112 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (44_988_000 as Weight) + // Standard Error: 10_000 + .saturating_add((201_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((1_126_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[0, 99]`. + /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - // Minimum execution time: 37_914 nanoseconds. - Weight::from_ref_time(43_488_083 as u64) - // Standard Error: 1_631 - .saturating_add(Weight::from_ref_time(118_845 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (36_768_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - // Minimum execution time: 16_124 nanoseconds. - Weight::from_ref_time(18_580_462 as u64) - // Standard Error: 688 - .saturating_add(Weight::from_ref_time(67_220 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (13_474_000 as Weight) + // Standard Error: 0 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - // Minimum execution time: 41_517 nanoseconds. - Weight::from_ref_time(45_123_530 as u64) - // Standard Error: 1_530 - .saturating_add(Weight::from_ref_time(105_429 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (37_720_000 as Weight) + // Standard Error: 1_000 + .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[0, 99]`. + /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - // Minimum execution time: 30_171 nanoseconds. - Weight::from_ref_time(33_355_514 as u64) - // Standard Error: 1_286 - .saturating_add(Weight::from_ref_time(114_716 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (26_848_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 8c08ad1a8a89a..a90b95b21cd88 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -33,7 +33,6 @@ pallet-session = { version = "4.0.0-dev", path = "../session" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 342522ff29b19..f190f6672f309 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -337,7 +337,7 @@ pub mod pallet { type MaxPeerDataEncodingSize: Get; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// A type for retrieving the validators supposed to be online in a session. type ValidatorSet: ValidatorSetWithIdentification; @@ -958,12 +958,12 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn slash_fraction(&self, offenders: u32) -> Perbill { + fn slash_fraction(offenders: u32, validator_set_count: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% // when 13/30 are offline (around 5% when 1/3 are offline). - if let Some(threshold) = offenders.checked_sub(self.validator_set_count / 10 + 1) { - let x = Perbill::from_rational(3 * threshold, self.validator_set_count); + if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { + let x = Perbill::from_rational(3 * threshold, validator_set_count); x.saturating_mul(Perbill::from_percent(7)) } else { Perbill::default() diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 5782e1a615b8e..2459f7e748941 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -19,6 +19,8 @@ #![cfg(test)] +use std::cell::RefCell; + use frame_support::{ parameter_types, traits::{ConstU32, ConstU64}, @@ -55,18 +57,18 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub static Validators: Option> = Some(vec![ +thread_local! { + pub static VALIDATORS: RefCell>> = RefCell::new(Some(vec![ 1, 2, 3, - ]); + ])); } pub struct TestSessionManager; impl pallet_session::SessionManager for TestSessionManager { fn new_session(_new_index: SessionIndex) -> Option> { - Validators::mutate(|l| l.take()) + VALIDATORS.with(|l| l.borrow_mut().take()) } fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} @@ -74,8 +76,10 @@ impl pallet_session::SessionManager for TestSessionManager { impl pallet_session::historical::SessionManager for TestSessionManager { fn new_session(_new_index: SessionIndex) -> Option> { - Validators::mutate(|l| { - l.take().map(|validators| validators.iter().map(|v| (*v, *v)).collect()) + VALIDATORS.with(|l| { + l.borrow_mut() + .take() + .map(|validators| validators.iter().map(|v| (*v, *v)).collect()) }) } fn end_session(_: SessionIndex) {} @@ -83,19 +87,19 @@ impl pallet_session::historical::SessionManager for TestSessionManager } /// An extrinsic type used for tests. -pub type Extrinsic = TestXt; +pub type Extrinsic = TestXt; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; -parameter_types! { - pub static Offences: Vec<(Vec, Offence)> = vec![]; +thread_local! { + pub static OFFENCES: RefCell, Offence)>> = RefCell::new(vec![]); } /// A mock offence report handler. pub struct OffenceHandler; impl ReportOffence for OffenceHandler { fn report_offence(reporters: Vec, offence: Offence) -> Result<(), OffenceError> { - Offences::mutate(|l| l.push((reporters, offence))); + OFFENCES.with(|l| l.borrow_mut().push((reporters, offence))); Ok(()) } @@ -111,7 +115,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { result.execute_with(|| { for i in 1..=6 { System::inc_providers(&i); - assert_eq!(Session::set_keys(RuntimeOrigin::signed(i), (i - 1).into(), vec![]), Ok(())); + assert_eq!(Session::set_keys(Origin::signed(i), (i - 1).into(), vec![]), Ok(())); } }); result @@ -119,7 +123,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Runtime { @@ -127,16 +131,16 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -162,7 +166,7 @@ impl pallet_session::Config for Runtime { type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = UintAuthorityId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } @@ -179,12 +183,12 @@ impl pallet_authorship::Config for Runtime { type EventHandler = ImOnline; } -parameter_types! { - pub static MockCurrentSessionProgress: Option> = None; +thread_local! { + pub static MOCK_CURRENT_SESSION_PROGRESS: RefCell>> = RefCell::new(None); } -parameter_types! { - pub static MockAverageSessionLength: Option = None; +thread_local! { + pub static MOCK_AVERAGE_SESSION_LENGTH: RefCell> = RefCell::new(None); } pub struct TestNextSessionRotation; @@ -192,7 +196,7 @@ pub struct TestNextSessionRotation; impl frame_support::traits::EstimateNextSessionRotation for TestNextSessionRotation { fn average_session_length() -> u64 { // take the mock result if any and return it - let mock = MockAverageSessionLength::mutate(|p| p.take()); + let mock = MOCK_AVERAGE_SESSION_LENGTH.with(|p| p.borrow_mut().take()); mock.unwrap_or(pallet_session::PeriodicSessions::::average_session_length()) } @@ -204,7 +208,7 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession ); // take the mock result if any and return it - let mock = MockCurrentSessionProgress::mutate(|p| p.take()); + let mock = MOCK_CURRENT_SESSION_PROGRESS.with(|p| p.borrow_mut().take()); (mock.unwrap_or(estimate), weight) } @@ -216,7 +220,7 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession impl Config for Runtime { type AuthorityId = UintAuthorityId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorSet = Historical; type NextSessionRotation = TestNextSessionRotation; type ReportUnresponsiveness = OffenceHandler; @@ -229,9 +233,9 @@ impl Config for Runtime { impl frame_system::offchain::SendTransactionTypes for Runtime where - RuntimeCall: From, + Call: From, { - type OverarchingCall = RuntimeCall; + type OverarchingCall = Call; type Extrinsic = Extrinsic; } diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 366119278d836..288081556a085 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -36,24 +36,22 @@ use sp_runtime::{ #[test] fn test_unresponsiveness_slash_fraction() { - let dummy_offence = - UnresponsivenessOffence { session_index: 0, validator_set_count: 50, offenders: vec![()] }; // A single case of unresponsiveness is not slashed. - assert_eq!(dummy_offence.slash_fraction(1), Perbill::zero()); + assert_eq!(UnresponsivenessOffence::<()>::slash_fraction(1, 50), Perbill::zero()); assert_eq!( - dummy_offence.slash_fraction(5), + UnresponsivenessOffence::<()>::slash_fraction(5, 50), Perbill::zero(), // 0% ); assert_eq!( - dummy_offence.slash_fraction(7), + UnresponsivenessOffence::<()>::slash_fraction(7, 50), Perbill::from_parts(4200000), // 0.42% ); // One third offline should be punished around 5%. assert_eq!( - dummy_offence.slash_fraction(17), + UnresponsivenessOffence::<()>::slash_fraction(17, 50), Perbill::from_parts(46200000), // 4.62% ); } @@ -68,7 +66,7 @@ fn should_report_offline_validators() { advance_session(); // enact the change and buffer another one let validators = vec![1, 2, 3, 4, 5, 6]; - Validators::mutate(|l| *l = Some(validators.clone())); + VALIDATORS.with(|l| *l.borrow_mut() = Some(validators.clone())); advance_session(); // when @@ -76,7 +74,7 @@ fn should_report_offline_validators() { advance_session(); // then - let offences = Offences::take(); + let offences = OFFENCES.with(|l| l.replace(vec![])); assert_eq!( offences, vec![( @@ -96,7 +94,7 @@ fn should_report_offline_validators() { advance_session(); // then - let offences = Offences::take(); + let offences = OFFENCES.with(|l| l.replace(vec![])); assert_eq!( offences, vec![( @@ -141,7 +139,7 @@ fn heartbeat( "invalid validators len", e @ _ => <&'static str>::from(e), })?; - ImOnline::heartbeat(RuntimeOrigin::none(), heartbeat, signature) + ImOnline::heartbeat(Origin::none(), heartbeat, signature) } #[test] @@ -149,7 +147,7 @@ fn should_mark_online_validator_when_heartbeat_is_received() { new_test_ext().execute_with(|| { advance_session(); // given - Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -184,7 +182,7 @@ fn late_heartbeat_and_invalid_keys_len_should_fail() { new_test_ext().execute_with(|| { advance_session(); // given - Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -226,7 +224,7 @@ fn should_generate_heartbeats() { // buffer new validators Session::rotate_session(); // enact the change and buffer another one - Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); Session::rotate_session(); // when @@ -240,8 +238,7 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => - heartbeat, + crate::mock::Call::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), }; @@ -263,7 +260,7 @@ fn should_cleanup_received_heartbeats_on_session_end() { new_test_ext().execute_with(|| { advance_session(); - Validators::mutate(|l| *l = Some(vec![1, 2, 3])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one @@ -294,7 +291,7 @@ fn should_mark_online_validator_when_block_is_authored() { new_test_ext().execute_with(|| { advance_session(); // given - Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -331,7 +328,7 @@ fn should_not_send_a_report_if_already_online() { ext.execute_with(|| { advance_session(); // given - Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -356,8 +353,7 @@ fn should_not_send_a_report_if_already_online() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => - heartbeat, + crate::mock::Call::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), }; @@ -395,12 +391,12 @@ fn should_handle_missing_progress_estimates() { Session::rotate_session(); // enact the change and buffer another one - Validators::mutate(|l| *l = Some(vec![0, 1, 2])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![0, 1, 2])); Session::rotate_session(); // we will return `None` on the next call to `estimate_current_session_progress` // and the offchain worker should fallback to checking `HeartbeatAfter` - MockCurrentSessionProgress::mutate(|p| *p = Some(None)); + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); ImOnline::offchain_worker(block); assert_eq!(state.read().transactions.len(), 3); @@ -429,25 +425,26 @@ fn should_handle_non_linear_session_progress() { // mock the session length as being 10 blocks long, // enact the change and buffer another one - Validators::mutate(|l| *l = Some(vec![0, 1, 2])); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![0, 1, 2])); // mock the session length has being 10 which should make us assume the fallback for half // session will be reached by block 5. - MockAverageSessionLength::mutate(|p| *p = Some(10)); + MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(10)); Session::rotate_session(); // if we don't have valid results for the current session progres then // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. - MockCurrentSessionProgress::mutate(|p| *p = Some(None)); + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); - MockCurrentSessionProgress::mutate(|p| *p = Some(None)); + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); assert!(ImOnline::send_heartbeats(5).ok().is_some()); // if we have a valid current session progress then we'll heartbeat as soon // as we're past 80% of the session regardless of the block number - MockCurrentSessionProgress::mutate(|p| *p = Some(Some(Permill::from_percent(81)))); + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_percent(81)))); assert!(ImOnline::send_heartbeats(2).ok().is_some()); }); @@ -465,7 +462,8 @@ fn test_does_not_heartbeat_early_in_the_session() { ext.execute_with(|| { // mock current session progress as being 5%. we only randomly start // heartbeating after 10% of the session has elapsed. - MockCurrentSessionProgress::mutate(|p| *p = Some(Some(Permill::from_float(0.05)))); + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(0.05)))); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); }); } @@ -483,8 +481,9 @@ fn test_probability_of_heartbeating_increases_with_session_progress() { let set_test = |progress, random: f64| { // the average session length is 100 blocks, therefore the residual // probability of sending a heartbeat is 1% - MockAverageSessionLength::mutate(|p| *p = Some(100)); - MockCurrentSessionProgress::mutate(|p| *p = Some(Some(Permill::from_float(progress)))); + MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(100)); + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(progress)))); let mut seed = [0u8; 32]; let encoded = ((random * Permill::ACCURACY as f64) as u32).encode(); diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index f81db997c303d..34762e66ec301 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_im_online //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/im-online/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/im-online/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,17 +55,14 @@ impl WeightInfo for SubstrateWeight { // Storage: ImOnline ReceivedHeartbeats (r:1 w:1) // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) - /// The range of component `k` is `[1, 1000]`. - /// The range of component `e` is `[1, 100]`. fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - // Minimum execution time: 101_380 nanoseconds. - Weight::from_ref_time(82_735_670 as u64) - // Standard Error: 121 - .saturating_add(Weight::from_ref_time(21_279 as u64).saturating_mul(k as u64)) - // Standard Error: 1_222 - .saturating_add(Weight::from_ref_time(296_343 as u64).saturating_mul(e as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (79_225_000 as Weight) + // Standard Error: 0 + .saturating_add((41_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 0 + .saturating_add((293_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } @@ -79,16 +73,13 @@ impl WeightInfo for () { // Storage: ImOnline ReceivedHeartbeats (r:1 w:1) // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) - /// The range of component `k` is `[1, 1000]`. - /// The range of component `e` is `[1, 100]`. fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - // Minimum execution time: 101_380 nanoseconds. - Weight::from_ref_time(82_735_670 as u64) - // Standard Error: 121 - .saturating_add(Weight::from_ref_time(21_279 as u64).saturating_mul(k as u64)) - // Standard Error: 1_222 - .saturating_add(Weight::from_ref_time(296_343 as u64).saturating_mul(e as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (79_225_000 as Weight) + // Standard Error: 0 + .saturating_add((41_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 0 + .saturating_add((293_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index adc3f2a6ea90f..90eb18a106000 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -30,7 +30,6 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index f462f22284d40..cb06cd809f542 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -44,11 +44,10 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index) + }: _(RawOrigin::Signed(caller.clone()), recipient.clone(), account_index) verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); } @@ -71,11 +70,10 @@ benchmarks! { let original: T::AccountId = account("original", 0, SEED); T::Currency::make_free_balance_be(&original, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(original).into(), account_index)?; - }: _(RawOrigin::Root, recipient_lookup, account_index, false) + }: _(RawOrigin::Root, recipient.clone(), account_index, false) verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); } diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 41893254c3c97..ddc03c94b1233 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -36,7 +36,6 @@ pub use weights::WeightInfo; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; pub use pallet::*; @@ -68,7 +67,7 @@ pub mod pallet { type Deposit: Get>; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -134,11 +133,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, - new: AccountIdLookupOf, + new: T::AccountId, index: T::AccountIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; - let new = T::Lookup::lookup(new)?; ensure!(who != new, Error::::NotTransfer); Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -210,12 +208,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, - new: AccountIdLookupOf, + new: T::AccountId, index: T::AccountIndex, freeze: bool, ) -> DispatchResult { ensure_root(origin)?; - let new = T::Lookup::lookup(new)?; Accounts::::mutate(index, |maybe_value| { if let Some((account, amount, _)) = maybe_value.take() { diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index fd2e9fff16885..6bd79708c3dd2 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -44,7 +44,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -52,8 +52,8 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -61,7 +61,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = Indices; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -80,7 +80,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -90,7 +90,7 @@ impl Config for Test { type AccountIndex = u64; type Currency = Balances; type Deposit = ConstU64<1>; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); } diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index bed6cfffaa825..73d591c38bb2f 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -22,7 +22,6 @@ use super::{mock::*, *}; use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; -use sp_runtime::MultiAddress::Id; #[test] fn claiming_should_work() { @@ -61,7 +60,7 @@ fn freezing_should_work() { assert_noop!(Indices::freeze(Some(1).into(), 0), Error::::Permanent); assert_noop!(Indices::free(Some(1).into(), 0), Error::::Permanent); - assert_noop!(Indices::transfer(Some(1).into(), Id(2), 0), Error::::Permanent); + assert_noop!(Indices::transfer(Some(1).into(), 2, 0), Error::::Permanent); }); } @@ -91,9 +90,9 @@ fn reclaim_index_on_accounts_should_work() { fn transfer_index_on_accounts_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_noop!(Indices::transfer(Some(1).into(), Id(2), 1), Error::::NotAssigned); - assert_noop!(Indices::transfer(Some(2).into(), Id(3), 0), Error::::NotOwner); - assert_ok!(Indices::transfer(Some(1).into(), Id(3), 0)); + assert_noop!(Indices::transfer(Some(1).into(), 2, 1), Error::::NotAssigned); + assert_noop!(Indices::transfer(Some(2).into(), 3, 0), Error::::NotOwner); + assert_ok!(Indices::transfer(Some(1).into(), 3, 0)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 1); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -104,7 +103,7 @@ fn transfer_index_on_accounts_should_work() { fn force_transfer_index_on_preowned_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::force_transfer(RuntimeOrigin::root(), Id(3), 0, false)); + assert_ok!(Indices::force_transfer(Origin::root(), 3, 0, false)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -114,7 +113,7 @@ fn force_transfer_index_on_preowned_should_work() { #[test] fn force_transfer_index_on_free_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Indices::force_transfer(RuntimeOrigin::root(), Id(3), 0, false)); + assert_ok!(Indices::force_transfer(Origin::root(), 3, 0, false)); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); }); diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 7b974875cdf51..6635d45272048 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_indices //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/indices/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/indices/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,40 +56,35 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - // Minimum execution time: 31_756 nanoseconds. - Weight::from_ref_time(32_252_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (25_929_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 38_686 nanoseconds. - Weight::from_ref_time(39_402_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (32_627_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - // Minimum execution time: 33_752 nanoseconds. - Weight::from_ref_time(34_348_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (26_804_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - // Minimum execution time: 32_739 nanoseconds. - Weight::from_ref_time(33_151_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (27_390_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - // Minimum execution time: 40_369 nanoseconds. - Weight::from_ref_time(40_982_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (30_973_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } @@ -100,39 +92,34 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - // Minimum execution time: 31_756 nanoseconds. - Weight::from_ref_time(32_252_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (25_929_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 38_686 nanoseconds. - Weight::from_ref_time(39_402_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (32_627_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - // Minimum execution time: 33_752 nanoseconds. - Weight::from_ref_time(34_348_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (26_804_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - // Minimum execution time: 32_739 nanoseconds. - Weight::from_ref_time(33_151_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (27_390_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - // Minimum execution time: 40_369 nanoseconds. - Weight::from_ref_time(40_982_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (30_973_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index 486bb356059f6..8f7c8eefe800d 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -32,7 +32,6 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs index fba722a07fabd..1c850e66f9c6e 100644 --- a/frame/lottery/src/benchmarking.rs +++ b/frame/lottery/src/benchmarking.rs @@ -78,7 +78,7 @@ benchmarks! { let calls = vec![frame_system::Call::::remark { remark: vec![] }.into(); n as usize]; let origin = T::ManagerOrigin::successful_origin(); assert!(CallIndices::::get().is_empty()); - }: _(origin, calls) + }: _(origin, calls) verify { if !n.is_zero() { assert!(!CallIndices::::get().is_empty()); @@ -90,7 +90,7 @@ benchmarks! { let end = 10u32.into(); let payout = 5u32.into(); let origin = T::ManagerOrigin::successful_origin(); - }: _(origin, price, end, payout, true) + }: _(origin, price, end, payout, true) verify { assert!(crate::Lottery::::get().is_some()); } @@ -99,7 +99,7 @@ benchmarks! { setup_lottery::(true)?; assert_eq!(crate::Lottery::::get().unwrap().repeat, true); let origin = T::ManagerOrigin::successful_origin(); - }: _(origin) + }: _(origin) verify { assert_eq!(crate::Lottery::::get().unwrap().repeat, false); } diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index c501a30ef5f4a..02df65a3336bf 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -96,17 +96,17 @@ pub struct LotteryConfig { } pub trait ValidateCall { - fn validate_call(call: &::RuntimeCall) -> bool; + fn validate_call(call: &::Call) -> bool; } impl ValidateCall for () { - fn validate_call(_: &::RuntimeCall) -> bool { + fn validate_call(_: &::Call) -> bool { false } } impl ValidateCall for Pallet { - fn validate_call(call: &::RuntimeCall) -> bool { + fn validate_call(call: &::Call) -> bool { let valid_calls = CallIndices::::get(); let call_index = match Self::call_to_index(call) { Ok(call_index) => call_index, @@ -134,8 +134,8 @@ pub mod pallet { type PalletId: Get; /// A dispatchable call. - type RuntimeCall: Parameter - + Dispatchable + type Call: Parameter + + Dispatchable + GetDispatchInfo + From>; @@ -146,10 +146,10 @@ pub mod pallet { type Randomness: Randomness; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The manager origin. - type ManagerOrigin: EnsureOrigin; + type ManagerOrigin: EnsureOrigin; /// The max number of calls available in a single lottery. #[pallet::constant] @@ -300,10 +300,7 @@ pub mod pallet { T::WeightInfo::buy_ticket() .saturating_add(call.get_dispatch_info().weight) )] - pub fn buy_ticket( - origin: OriginFor, - call: Box<::RuntimeCall>, - ) -> DispatchResult { + pub fn buy_ticket(origin: OriginFor, call: Box<::Call>) -> DispatchResult { let caller = ensure_signed(origin.clone())?; call.clone().dispatch(origin).map_err(|e| e.error)?; @@ -318,10 +315,7 @@ pub mod pallet { /// /// This extrinsic must be called by the Manager origin. #[pallet::weight(T::WeightInfo::set_calls(calls.len() as u32))] - pub fn set_calls( - origin: OriginFor, - calls: Vec<::RuntimeCall>, - ) -> DispatchResult { + pub fn set_calls(origin: OriginFor, calls: Vec<::Call>) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; ensure!(calls.len() <= T::MaxCalls::get() as usize, Error::::TooManyCalls); if calls.is_empty() { @@ -410,7 +404,7 @@ impl Pallet { /// Converts a vector of calls into a vector of call indices. fn calls_to_indices( - calls: &[::RuntimeCall], + calls: &[::Call], ) -> Result, DispatchError> { let mut indices = BoundedVec::::with_bounded_capacity(calls.len()); for c in calls.iter() { @@ -421,7 +415,7 @@ impl Pallet { } /// Convert a call to it's call index by encoding the call and taking the first two bytes. - fn call_to_index(call: &::RuntimeCall) -> Result { + fn call_to_index(call: &::Call) -> Result { let encoded_call = call.encode(); if encoded_call.len() < 2 { return Err(Error::::EncodingFailed.into()) @@ -430,7 +424,7 @@ impl Pallet { } /// Logic for buying a ticket. - fn do_buy_ticket(caller: &T::AccountId, call: &::RuntimeCall) -> DispatchResult { + fn do_buy_ticket(caller: &T::AccountId, call: &::Call) -> DispatchResult { // Check the call is valid lottery let config = Lottery::::get().ok_or(Error::::NotConfigured)?; let block_number = frame_system::Pallet::::block_number(); diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index 1977da5959d39..592551fb6b93f 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -57,16 +57,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -84,7 +84,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -97,10 +97,10 @@ parameter_types! { impl Config for Test { type PalletId = LotteryPalletId; - type RuntimeCall = RuntimeCall; + type Call = Call; type Currency = Balances; type Randomness = TestRandomness; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ManagerOrigin = EnsureRoot; type MaxCalls = ConstU32<2>; type ValidateCall = Lottery; diff --git a/frame/lottery/src/tests.rs b/frame/lottery/src/tests.rs index 0eaf080564008..d8dd6e4b7fe6c 100644 --- a/frame/lottery/src/tests.rs +++ b/frame/lottery/src/tests.rs @@ -20,8 +20,7 @@ use super::*; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, Lottery, RuntimeCall, RuntimeOrigin, - SystemCall, Test, + new_test_ext, run_to_block, Balances, BalancesCall, Call, Lottery, Origin, SystemCall, Test, }; use pallet_balances::Error as BalancesError; use sp_runtime::traits::BadOrigin; @@ -44,20 +43,20 @@ fn basic_end_to_end_works() { let length = 20; let delay = 5; let calls = vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; // Set calls for the lottery - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); + assert_ok!(Lottery::set_calls(Origin::root(), calls)); // Start lottery, it repeats - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, true)); + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, true)); assert!(crate::Lottery::::get().is_some()); assert_eq!(Balances::free_balance(&1), 100); - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); // 20 from the transfer, 10 from buying a ticket assert_eq!(Balances::free_balance(&1), 100 - 20 - 10); assert_eq!(Participants::::get(&1).1.len(), 1); @@ -66,14 +65,14 @@ fn basic_end_to_end_works() { assert_eq!(Tickets::::get(0), Some(1)); // More ticket purchases - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(3), call.clone())); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(4), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(3), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(4), call.clone())); assert_eq!(TicketsCount::::get(), 4); // Go to end run_to_block(20); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(5), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(5), call.clone())); // Ticket isn't bought assert_eq!(TicketsCount::::get(), 4); @@ -100,15 +99,15 @@ fn stop_repeat_works() { let delay = 5; // Set no calls for the lottery. - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), vec![])); + assert_ok!(Lottery::set_calls(Origin::root(), vec![])); // Start lottery, it repeats. - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, true)); + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, true)); // Non-manager fails to `stop_repeat`. - assert_noop!(Lottery::stop_repeat(RuntimeOrigin::signed(1)), DispatchError::BadOrigin); + assert_noop!(Lottery::stop_repeat(Origin::signed(1)), DispatchError::BadOrigin); // Manager can `stop_repeat`, even twice. - assert_ok!(Lottery::stop_repeat(RuntimeOrigin::root())); - assert_ok!(Lottery::stop_repeat(RuntimeOrigin::root())); + assert_ok!(Lottery::stop_repeat(Origin::root())); + assert_ok!(Lottery::stop_repeat(Origin::root())); // Lottery still exists. assert!(crate::Lottery::::get().is_some()); @@ -128,26 +127,26 @@ fn set_calls_works() { assert!(!CallIndices::::exists()); let calls = vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); + assert_ok!(Lottery::set_calls(Origin::root(), calls)); assert!(CallIndices::::exists()); let too_many_calls = vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), - RuntimeCall::System(SystemCall::remark { remark: vec![] }), + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::System(SystemCall::remark { remark: vec![] }), ]; assert_noop!( - Lottery::set_calls(RuntimeOrigin::root(), too_many_calls), + Lottery::set_calls(Origin::root(), too_many_calls), Error::::TooManyCalls, ); // Clear calls - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), vec![])); + assert_ok!(Lottery::set_calls(Origin::root(), vec![])); assert!(CallIndices::::get().is_empty()); }); } @@ -156,8 +155,8 @@ fn set_calls_works() { fn call_to_indices_works() { new_test_ext().execute_with(|| { let calls = vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; let indices = Lottery::calls_to_indices(&calls).unwrap().into_inner(); // Only comparing the length since it is otherwise dependant on the API @@ -165,9 +164,9 @@ fn call_to_indices_works() { assert_eq!(indices.len(), calls.len()); let too_many_calls = vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), - RuntimeCall::System(SystemCall::remark { remark: vec![] }), + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::System(SystemCall::remark { remark: vec![] }), ]; assert_noop!(Lottery::calls_to_indices(&too_many_calls), Error::::TooManyCalls); }); @@ -182,16 +181,16 @@ fn start_lottery_works() { // Setup ignores bad origin assert_noop!( - Lottery::start_lottery(RuntimeOrigin::signed(1), price, length, delay, false), + Lottery::start_lottery(Origin::signed(1), price, length, delay, false), BadOrigin, ); // All good - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); // Can't open another one if lottery is already present assert_noop!( - Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, false), + Lottery::start_lottery(Origin::root(), price, length, delay, false), Error::::InProgress, ); }); @@ -203,49 +202,45 @@ fn buy_ticket_works_as_simple_passthrough() { // as a simple passthrough to the real call. new_test_ext().execute_with(|| { // No lottery set up - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 20 })); // This is just a basic transfer then - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(Balances::free_balance(&1), 100 - 20); assert_eq!(TicketsCount::::get(), 0); // Lottery is set up, but too expensive to enter, so `do_buy_ticket` fails. let calls = vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); + assert_ok!(Lottery::set_calls(Origin::root(), calls)); // Ticket price of 60 would kill the user's account - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 60, 10, 5, false)); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); + assert_ok!(Lottery::start_lottery(Origin::root(), 60, 10, 5, false)); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(Balances::free_balance(&1), 100 - 20 - 20); assert_eq!(TicketsCount::::get(), 0); // If call would fail, the whole thing still fails the same - let fail_call = - Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); + let fail_call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); assert_noop!( - Lottery::buy_ticket(RuntimeOrigin::signed(1), fail_call), + Lottery::buy_ticket(Origin::signed(1), fail_call), BalancesError::::InsufficientBalance, ); - let bad_origin_call = Box::new(RuntimeCall::Balances(BalancesCall::force_transfer { - source: 0, - dest: 0, - value: 0, - })); - assert_noop!(Lottery::buy_ticket(RuntimeOrigin::signed(1), bad_origin_call), BadOrigin,); + let bad_origin_call = + Box::new(Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 })); + assert_noop!(Lottery::buy_ticket(Origin::signed(1), bad_origin_call), BadOrigin,); // User can call other txs, but doesn't get a ticket let remark_call = - Box::new(RuntimeCall::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), remark_call)); + Box::new(Call::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), remark_call)); assert_eq!(TicketsCount::::get(), 0); let successful_call = - Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), successful_call)); + Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), successful_call)); assert_eq!(TicketsCount::::get(), 1); }); } @@ -255,43 +250,42 @@ fn buy_ticket_works() { new_test_ext().execute_with(|| { // Set calls for the lottery. let calls = vec![ - RuntimeCall::System(SystemCall::remark { remark: vec![] }), - RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::System(SystemCall::remark { remark: vec![] }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); + assert_ok!(Lottery::set_calls(Origin::root(), calls)); // Can't buy ticket before start - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 0); // Start lottery - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 20, 5, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), 1, 20, 5, false)); // Go to start, buy ticket for transfer run_to_block(5); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); // Can't buy another of the same ticket (even if call is slightly changed) - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 3, value: 30 })); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 3, value: 30 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); // Buy ticket for remark - let call = - Box::new(RuntimeCall::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); + let call = Box::new(Call::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 2); // Go to end, can't buy tickets anymore run_to_block(20); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); assert_eq!(TicketsCount::::get(), 2); // Go to payout, can't buy tickets when there is no lottery open run_to_block(25); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); assert_eq!(TicketsCount::::get(), 0); assert_eq!(LotteryIndex::::get(), 1); }); @@ -302,9 +296,9 @@ fn buy_ticket_works() { #[test] fn do_buy_ticket_already_participating() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 10, 10, false)); + let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); + assert_ok!(Lottery::start_lottery(Origin::root(), 1, 10, 10, false)); // Buying once works. assert_ok!(Lottery::do_buy_ticket(&1, &calls[0])); @@ -317,16 +311,16 @@ fn do_buy_ticket_already_participating() { #[test] fn buy_ticket_already_participating() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 10, 10, false)); + let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); + assert_ok!(Lottery::start_lottery(Origin::root(), 1, 10, 10, false)); // Buying once works. let call = Box::new(calls[0].clone()); - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); // Buying the same ticket again returns Ok, but changes nothing. - assert_storage_noop!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call).unwrap()); + assert_storage_noop!(Lottery::buy_ticket(Origin::signed(1), call).unwrap()); // Exactly one ticket exists. assert_eq!(TicketsCount::::get(), 1); @@ -337,14 +331,14 @@ fn buy_ticket_already_participating() { #[test] fn buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); + let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); // Price set to 100. - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 100, 10, 10, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), 100, 10, 10, false)); let call = Box::new(calls[0].clone()); // Buying a ticket returns Ok, but changes nothing. - assert_storage_noop!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call).unwrap()); + assert_storage_noop!(Lottery::buy_ticket(Origin::signed(1), call).unwrap()); assert!(TicketsCount::::get().is_zero()); }); } @@ -352,10 +346,10 @@ fn buy_ticket_insufficient_balance() { #[test] fn do_buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); + let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); // Price set to 101. - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 101, 10, 10, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), 101, 10, 10, false)); // Buying fails with InsufficientBalance. assert_noop!( @@ -369,10 +363,10 @@ fn do_buy_ticket_insufficient_balance() { #[test] fn do_buy_ticket_keep_alive() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); + let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); // Price set to 100. - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 100, 10, 10, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), 100, 10, 10, false)); // Buying fails with KeepAlive. assert_noop!(Lottery::do_buy_ticket(&1, &calls[0]), BalancesError::::KeepAlive); @@ -388,9 +382,9 @@ fn no_participants_works() { let delay = 5; // Set no calls for the lottery. - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), vec![])); + assert_ok!(Lottery::set_calls(Origin::root(), vec![])); // Start lottery. - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, length, delay, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), 10, length, delay, false)); // End the lottery, no one wins. run_to_block(length + delay); @@ -405,7 +399,7 @@ fn start_lottery_will_create_account() { let delay = 5; assert_eq!(Balances::total_balance(&Lottery::account_id()), 0); - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, false)); + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); assert_eq!(Balances::total_balance(&Lottery::account_id()), 1); }); } @@ -421,13 +415,13 @@ fn choose_ticket_trivial_cases() { #[test] fn choose_account_one_participant() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); - assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, 10, 10, false)); + let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); + assert_ok!(Lottery::start_lottery(Origin::root(), 10, 10, 10, false)); let call = Box::new(calls[0].clone()); // Buy one ticket with account 1. - assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); // Account 1 is always the winner. assert_eq!(Lottery::choose_account().unwrap(), 1); }); diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index 4ced6a642781a..193958cfd41aa 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_lottery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/lottery/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/lottery/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,35 +63,30 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - // Minimum execution time: 52_451 nanoseconds. - Weight::from_ref_time(52_843_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (44_706_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Lottery CallIndices (r:0 w:1) - /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { - // Minimum execution time: 14_389 nanoseconds. - Weight::from_ref_time(15_700_569 as u64) - // Standard Error: 4_677 - .saturating_add(Weight::from_ref_time(296_850 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (12_556_000 as Weight) + // Standard Error: 7_000 + .saturating_add((295_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - // Minimum execution time: 44_814 nanoseconds. - Weight::from_ref_time(45_611_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (38_051_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - // Minimum execution time: 10_384 nanoseconds. - Weight::from_ref_time(10_727_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (6_910_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -102,10 +94,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - // Minimum execution time: 62_720 nanoseconds. - Weight::from_ref_time(63_545_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (53_732_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -114,10 +105,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - // Minimum execution time: 63_452 nanoseconds. - Weight::from_ref_time(65_010_000 as u64) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (55_868_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } } @@ -131,35 +121,30 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - // Minimum execution time: 52_451 nanoseconds. - Weight::from_ref_time(52_843_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (44_706_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Lottery CallIndices (r:0 w:1) - /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { - // Minimum execution time: 14_389 nanoseconds. - Weight::from_ref_time(15_700_569 as u64) - // Standard Error: 4_677 - .saturating_add(Weight::from_ref_time(296_850 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (12_556_000 as Weight) + // Standard Error: 7_000 + .saturating_add((295_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - // Minimum execution time: 44_814 nanoseconds. - Weight::from_ref_time(45_611_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (38_051_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - // Minimum execution time: 10_384 nanoseconds. - Weight::from_ref_time(10_727_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (6_910_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -167,10 +152,9 @@ impl WeightInfo for () { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - // Minimum execution time: 62_720 nanoseconds. - Weight::from_ref_time(63_545_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (53_732_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -179,9 +163,8 @@ impl WeightInfo for () { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - // Minimum execution time: 63_452 nanoseconds. - Weight::from_ref_time(65_010_000 as u64) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (55_868_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } } diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 8ec1087e5ac0e..0473fd46956af 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/membership/README.md b/frame/membership/README.md index 3499a3f864e48..a769be497050e 100644 --- a/frame/membership/README.md +++ b/frame/membership/README.md @@ -1,6 +1,6 @@ # Membership Module -Allows control of membership of a set of `AccountId`s, useful for managing membership of a +Allows control of membership of a set of `AccountId`s, useful for managing membership of of a collective. A prime member may be set. -License: Apache-2.0 +License: Apache-2.0 \ No newline at end of file diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 4191bbcc5d86e..24ecfd5333c66 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -17,7 +17,7 @@ //! # Membership Module //! -//! Allows control of membership of a set of `AccountId`s, useful for managing membership of a +//! Allows control of membership of a set of `AccountId`s, useful for managing membership of of a //! collective. A prime member may be set // Ensure we're `no_std` when compiling for Wasm. @@ -27,7 +27,6 @@ use frame_support::{ traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, BoundedVec, }; -use sp_runtime::traits::StaticLookup; use sp_std::prelude::*; pub mod migrations; @@ -36,8 +35,6 @@ pub mod weights; pub use pallet::*; pub use weights::WeightInfo; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -55,23 +52,22 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Required origin for adding a member (though can always be Root). - type AddOrigin: EnsureOrigin; + type AddOrigin: EnsureOrigin; /// Required origin for removing a member (though can always be Root). - type RemoveOrigin: EnsureOrigin; + type RemoveOrigin: EnsureOrigin; /// Required origin for adding and removing a member in a single action. - type SwapOrigin: EnsureOrigin; + type SwapOrigin: EnsureOrigin; /// Required origin for resetting membership. - type ResetOrigin: EnsureOrigin; + type ResetOrigin: EnsureOrigin; /// Required origin for setting or resetting the prime member. - type PrimeOrigin: EnsureOrigin; + type PrimeOrigin: EnsureOrigin; /// The receiver of the signal for when the membership has been initialized. This happens /// pre-genesis and will usually be the same as `MembershipChanged`. If you need to do @@ -148,7 +144,7 @@ pub mod pallet { /// One of the members' keys changed. KeyChanged, /// Phantom member, never used. - Dummy { _phantom_data: PhantomData<(T::AccountId, >::RuntimeEvent)> }, + Dummy { _phantom_data: PhantomData<(T::AccountId, >::Event)> }, } #[pallet::error] @@ -167,9 +163,8 @@ pub mod pallet { /// /// May only be called from `T::AddOrigin`. #[pallet::weight(50_000_000)] - pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; let mut members = >::get(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; @@ -189,9 +184,8 @@ pub mod pallet { /// /// May only be called from `T::RemoveOrigin`. #[pallet::weight(50_000_000)] - pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn remove_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; let mut members = >::get(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; @@ -214,12 +208,10 @@ pub mod pallet { #[pallet::weight(50_000_000)] pub fn swap_member( origin: OriginFor, - remove: AccountIdLookupOf, - add: AccountIdLookupOf, + remove: T::AccountId, + add: T::AccountId, ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; - let remove = T::Lookup::lookup(remove)?; - let add = T::Lookup::lookup(add)?; if remove == add { return Ok(()) @@ -267,9 +259,8 @@ pub mod pallet { /// /// Prime membership is passed from the origin account to `new`, if extant. #[pallet::weight(50_000_000)] - pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { + pub fn change_key(origin: OriginFor, new: T::AccountId) -> DispatchResult { let remove = ensure_signed(origin)?; - let new = T::Lookup::lookup(new)?; if remove != new { let mut members = >::get(); @@ -301,9 +292,8 @@ pub mod pallet { /// /// May only be called from `T::PrimeOrigin`. #[pallet::weight(50_000_000)] - pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn set_prime(origin: OriginFor, who: T::AccountId) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); @@ -365,8 +355,7 @@ mod benchmark { assert_ok!(>::reset_members(reset_origin, members.clone())); if let Some(prime) = prime.map(|i| members[i].clone()) { - let prime_lookup = T::Lookup::unlookup(prime); - assert_ok!(>::set_prime(prime_origin, prime_lookup)); + assert_ok!(>::set_prime(prime_origin, prime)); } else { assert_ok!(>::clear_prime(prime_origin)); } @@ -379,9 +368,8 @@ mod benchmark { let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); set_members::(members, None); let new_member = account::("add", m, SEED); - let new_member_lookup = T::Lookup::unlookup(new_member.clone()); }: { - assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member_lookup)); + assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member.clone())); } verify { assert!(>::get().contains(&new_member)); @@ -397,9 +385,8 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let to_remove = members.first().cloned().unwrap(); - let to_remove_lookup = T::Lookup::unlookup(to_remove.clone()); }: { - assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove_lookup)); + assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove.clone())); } verify { assert!(!>::get().contains(&to_remove)); // prime is rejigged @@ -414,14 +401,12 @@ mod benchmark { let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); set_members::(members.clone(), Some(members.len() - 1)); let add = account::("member", m, SEED); - let add_lookup = T::Lookup::unlookup(add.clone()); let remove = members.first().cloned().unwrap(); - let remove_lookup = T::Lookup::unlookup(remove.clone()); }: { assert_ok!(>::swap_member( T::SwapOrigin::successful_origin(), - remove_lookup, - add_lookup, + remove.clone(), + add.clone(), )); } verify { assert!(!>::get().contains(&remove)); @@ -457,10 +442,9 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let add = account::("member", m, SEED); - let add_lookup = T::Lookup::unlookup(add.clone()); whitelist!(prime); }: { - assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); + assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add.clone())); } verify { assert!(!>::get().contains(&prime)); assert!(>::get().contains(&add)); @@ -473,10 +457,9 @@ mod benchmark { let m in 1 .. T::MaxMembers::get(); let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); let prime = members.last().cloned().unwrap(); - let prime_lookup = T::Lookup::unlookup(prime.clone()); set_members::(members, None); }: { - assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime_lookup)); + assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime)); } verify { assert!(>::get().is_some()); assert!(::get_prime().is_some()); @@ -533,7 +516,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); pub static Members: Vec = vec![]; pub static Prime: Option = None; } @@ -543,16 +526,16 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -601,7 +584,7 @@ mod tests { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type AddOrigin = EnsureSignedBy; type RemoveOrigin = EnsureSignedBy; type SwapOrigin = EnsureSignedBy; @@ -647,16 +630,13 @@ mod tests { #[test] fn prime_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::set_prime(RuntimeOrigin::signed(4), 20), BadOrigin); - assert_noop!( - Membership::set_prime(RuntimeOrigin::signed(5), 15), - Error::::NotMember - ); - assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); + assert_noop!(Membership::set_prime(Origin::signed(4), 20), BadOrigin); + assert_noop!(Membership::set_prime(Origin::signed(5), 15), Error::::NotMember); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_eq!(Membership::prime(), Some(20)); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - assert_ok!(Membership::clear_prime(RuntimeOrigin::signed(5))); + assert_ok!(Membership::clear_prime(Origin::signed(5))); assert_eq!(Membership::prime(), None); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); @@ -665,12 +645,12 @@ mod tests { #[test] fn add_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::add_member(RuntimeOrigin::signed(5), 15), BadOrigin); + assert_noop!(Membership::add_member(Origin::signed(5), 15), BadOrigin); assert_noop!( - Membership::add_member(RuntimeOrigin::signed(1), 10), + Membership::add_member(Origin::signed(1), 10), Error::::AlreadyMember ); - assert_ok!(Membership::add_member(RuntimeOrigin::signed(1), 15)); + assert_ok!(Membership::add_member(Origin::signed(1), 15)); assert_eq!(Membership::members(), vec![10, 15, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); }); @@ -679,13 +659,13 @@ mod tests { #[test] fn remove_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::remove_member(RuntimeOrigin::signed(5), 20), BadOrigin); + assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); assert_noop!( - Membership::remove_member(RuntimeOrigin::signed(2), 15), + Membership::remove_member(Origin::signed(2), 15), Error::::NotMember ); - assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); - assert_ok!(Membership::remove_member(RuntimeOrigin::signed(2), 20)); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_ok!(Membership::remove_member(Origin::signed(2), 20)); assert_eq!(Membership::members(), vec![10, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), None); @@ -696,24 +676,24 @@ mod tests { #[test] fn swap_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::swap_member(RuntimeOrigin::signed(5), 10, 25), BadOrigin); + assert_noop!(Membership::swap_member(Origin::signed(5), 10, 25), BadOrigin); assert_noop!( - Membership::swap_member(RuntimeOrigin::signed(3), 15, 25), + Membership::swap_member(Origin::signed(3), 15, 25), Error::::NotMember ); assert_noop!( - Membership::swap_member(RuntimeOrigin::signed(3), 10, 30), + Membership::swap_member(Origin::signed(3), 10, 30), Error::::AlreadyMember ); - assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); - assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 20, 20)); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); assert_eq!(Membership::members(), vec![10, 20, 30]); assert_eq!(Membership::prime(), Some(20)); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 10)); - assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 25)); + assert_ok!(Membership::set_prime(Origin::signed(5), 10)); + assert_ok!(Membership::swap_member(Origin::signed(3), 10, 25)); assert_eq!(Membership::members(), vec![20, 25, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), None); @@ -724,7 +704,7 @@ mod tests { #[test] fn swap_member_works_that_does_not_change_order() { new_test_ext().execute_with(|| { - assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 5)); + assert_ok!(Membership::swap_member(Origin::signed(3), 10, 5)); assert_eq!(Membership::members(), vec![5, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); }); @@ -733,16 +713,16 @@ mod tests { #[test] fn change_key_works() { new_test_ext().execute_with(|| { - assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 10)); + assert_ok!(Membership::set_prime(Origin::signed(5), 10)); assert_noop!( - Membership::change_key(RuntimeOrigin::signed(3), 25), + Membership::change_key(Origin::signed(3), 25), Error::::NotMember ); assert_noop!( - Membership::change_key(RuntimeOrigin::signed(10), 20), + Membership::change_key(Origin::signed(10), 20), Error::::AlreadyMember ); - assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 40)); + assert_ok!(Membership::change_key(Origin::signed(10), 40)); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), Some(40)); @@ -753,7 +733,7 @@ mod tests { #[test] fn change_key_works_that_does_not_change_order() { new_test_ext().execute_with(|| { - assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 5)); + assert_ok!(Membership::change_key(Origin::signed(10), 5)); assert_eq!(Membership::members(), vec![5, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); }); @@ -762,19 +742,19 @@ mod tests { #[test] fn reset_members_works() { new_test_ext().execute_with(|| { - assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_noop!( - Membership::reset_members(RuntimeOrigin::signed(1), bounded_vec![20, 40, 30]), + Membership::reset_members(Origin::signed(1), bounded_vec![20, 40, 30]), BadOrigin ); - assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![20, 40, 30])); + assert_ok!(Membership::reset_members(Origin::signed(4), vec![20, 40, 30])); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), Some(20)); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![10, 40, 30])); + assert_ok!(Membership::reset_members(Origin::signed(4), vec![10, 40, 30])); assert_eq!(Membership::members(), vec![10, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), None); diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs index 5b8735aa2bac9..b3b52751d9598 100644 --- a/frame/membership/src/migrations/v4.rs +++ b/frame/membership/src/migrations/v4.rs @@ -46,7 +46,7 @@ pub fn migrate::on_chain_storage_version(); @@ -71,7 +71,7 @@ pub fn migrate WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { - // Minimum execution time: 23_796 nanoseconds. - Weight::from_ref_time(24_829_996 as u64) - // Standard Error: 723 - .saturating_add(Weight::from_ref_time(48_467 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (15_318_000 as Weight) + // Standard Error: 0 + .saturating_add((51_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { - // Minimum execution time: 27_255 nanoseconds. - Weight::from_ref_time(28_234_490 as u64) - // Standard Error: 833 - .saturating_add(Weight::from_ref_time(34_894 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (18_005_000 as Weight) + // Standard Error: 0 + .saturating_add((45_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { - // Minimum execution time: 26_626 nanoseconds. - Weight::from_ref_time(27_989_042 as u64) - // Standard Error: 729 - .saturating_add(Weight::from_ref_time(51_567 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (18_029_000 as Weight) + // Standard Error: 0 + .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { - // Minimum execution time: 25_412 nanoseconds. - Weight::from_ref_time(27_713_414 as u64) - // Standard Error: 883 - .saturating_add(Weight::from_ref_time(157_085 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (18_105_000 as Weight) + // Standard Error: 0 + .saturating_add((158_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:1) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { - // Minimum execution time: 27_122 nanoseconds. - Weight::from_ref_time(28_477_394 as u64) - // Standard Error: 801 - .saturating_add(Weight::from_ref_time(56_383 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (18_852_000 as Weight) + // Standard Error: 0 + .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { - // Minimum execution time: 9_368 nanoseconds. - Weight::from_ref_time(10_133_132 as u64) - // Standard Error: 366 - .saturating_add(Weight::from_ref_time(17_708 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (4_869_000 as Weight) + // Standard Error: 0 + .saturating_add((28_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn clear_prime(m: u32, ) -> Weight { - // Minimum execution time: 5_546 nanoseconds. - Weight::from_ref_time(5_962_740 as u64) - // Standard Error: 186 - .saturating_add(Weight::from_ref_time(2_096 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (1_593_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -158,91 +141,77 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { - // Minimum execution time: 23_796 nanoseconds. - Weight::from_ref_time(24_829_996 as u64) - // Standard Error: 723 - .saturating_add(Weight::from_ref_time(48_467 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (15_318_000 as Weight) + // Standard Error: 0 + .saturating_add((51_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { - // Minimum execution time: 27_255 nanoseconds. - Weight::from_ref_time(28_234_490 as u64) - // Standard Error: 833 - .saturating_add(Weight::from_ref_time(34_894 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (18_005_000 as Weight) + // Standard Error: 0 + .saturating_add((45_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { - // Minimum execution time: 26_626 nanoseconds. - Weight::from_ref_time(27_989_042 as u64) - // Standard Error: 729 - .saturating_add(Weight::from_ref_time(51_567 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (18_029_000 as Weight) + // Standard Error: 0 + .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { - // Minimum execution time: 25_412 nanoseconds. - Weight::from_ref_time(27_713_414 as u64) - // Standard Error: 883 - .saturating_add(Weight::from_ref_time(157_085 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (18_105_000 as Weight) + // Standard Error: 0 + .saturating_add((158_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalMembership Prime (r:1 w:1) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { - // Minimum execution time: 27_122 nanoseconds. - Weight::from_ref_time(28_477_394 as u64) - // Standard Error: 801 - .saturating_add(Weight::from_ref_time(56_383 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (18_852_000 as Weight) + // Standard Error: 0 + .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { - // Minimum execution time: 9_368 nanoseconds. - Weight::from_ref_time(10_133_132 as u64) - // Standard Error: 366 - .saturating_add(Weight::from_ref_time(17_708 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (4_869_000 as Weight) + // Standard Error: 0 + .saturating_add((28_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. fn clear_prime(m: u32, ) -> Weight { - // Minimum execution time: 5_546 nanoseconds. - Weight::from_ref_time(5_962_740 as u64) - // Standard Error: 186 - .saturating_add(Weight::from_ref_time(2_096 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (1_593_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 9a3ee517e7d42..75301afed0094 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } +mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.3.2", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } @@ -25,15 +25,15 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -array-bytes = "4.1" env_logger = "0.9" +hex-literal = "0.3" itertools = "0.10.3" [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "mmr-lib/std", diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index eb2e1e8b53d9e..c7d9662904747 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -21,7 +21,6 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle-mountain-range" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } -anyhow = "1" [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 8476d82f3e70d..75032d40f492a 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -22,7 +22,7 @@ use std::{marker::PhantomData, sync::Arc}; -use codec::{Codec, Decode, Encode}; +use codec::{Codec, Encode}; use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, @@ -30,36 +30,61 @@ use jsonrpsee::{ }; use serde::{Deserialize, Serialize}; -use sp_api::{NumberFor, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_mmr_primitives::{Error as MmrError, Proof}; +use sp_mmr_primitives::{BatchProof, Error as MmrError, LeafIndex, Proof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; const RUNTIME_ERROR: i32 = 8000; const MMR_ERROR: i32 = 8010; +const LEAF_NOT_FOUND_ERROR: i32 = MMR_ERROR + 1; +const GENERATE_PROOF_ERROR: i32 = MMR_ERROR + 2; + +/// Retrieved MMR leaf and its proof. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct LeafProof { + /// Block hash the proof was generated for. + pub block_hash: BlockHash, + /// SCALE-encoded leaf data. + pub leaf: Bytes, + /// SCALE-encoded proof data. See [sp_mmr_primitives::Proof]. + pub proof: Bytes, +} + +impl LeafProof { + /// Create new `LeafProof` from given concrete `leaf` and `proof`. + pub fn new(block_hash: BlockHash, leaf: Leaf, proof: Proof) -> Self + where + Leaf: Encode, + MmrHash: Encode, + { + Self { block_hash, leaf: Bytes(leaf.encode()), proof: Bytes(proof.encode()) } + } +} /// Retrieved MMR leaves and their proof. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] -pub struct LeavesProof { +pub struct LeafBatchProof { /// Block hash the proof was generated for. pub block_hash: BlockHash, /// SCALE-encoded vector of `LeafData`. pub leaves: Bytes, - /// SCALE-encoded proof data. See [sp_mmr_primitives::Proof]. + /// SCALE-encoded proof data. See [sp_mmr_primitives::BatchProof]. pub proof: Bytes, } -impl LeavesProof { - /// Create new `LeavesProof` from a given vector of `Leaf` and a - /// [sp_mmr_primitives::Proof]. +impl LeafBatchProof { + /// Create new `LeafBatchProof` from a given vector of `Leaf` and a + /// [sp_mmr_primitives::BatchProof]. pub fn new( block_hash: BlockHash, leaves: Vec, - proof: Proof, + proof: BatchProof, ) -> Self where Leaf: Encode, @@ -71,59 +96,38 @@ impl LeavesProof { /// MMR RPC methods. #[rpc(client, server)] -pub trait MmrApi { - /// Get the MMR root hash for the current best block. - #[method(name = "mmr_root")] - fn mmr_root(&self, at: Option) -> RpcResult; - - /// Generate an MMR proof for the given `block_numbers`. +pub trait MmrApi { + /// Generate MMR proof for given leaf index. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// an MMR proof for the set of blocks that have the given `block_numbers` with the MMR root at - /// `best_known_block_number`. `best_known_block_number` must be larger than all the - /// `block_numbers` for the function to succeed. + /// MMR proof for leaf at given `leaf_index`. + /// Optionally, a block hash at which the runtime should be queried can be specified. /// - /// Optionally via `at`, a block hash at which the runtime should be queried can be specified. - /// Optionally via `best_known_block_number`, the proof can be generated using the MMR's state - /// at a specific best block. Note that if `best_known_block_number` is provided, then also - /// specifying the block hash via `at` isn't super-useful here, unless you're generating proof - /// using non-finalized blocks where there are several competing forks. That's because MMR state - /// will be fixed to the state with `best_known_block_number`, which already points to - /// some historical block. - /// - /// Returns the (full) leaves and a proof for these leaves (compact encoding, i.e. hash of - /// the leaves). Both parameters are SCALE-encoded. - /// The order of entries in the `leaves` field of the returned struct - /// is the same as the order of the entries in `block_numbers` supplied + /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of + /// the leaf). Both parameters are SCALE-encoded. #[method(name = "mmr_generateProof")] fn generate_proof( &self, - block_numbers: Vec, - best_known_block_number: Option, + leaf_index: LeafIndex, at: Option, - ) -> RpcResult>; + ) -> RpcResult>; - /// Verify an MMR `proof`. - /// - /// This method calls into a runtime with MMR pallet included and attempts to verify - /// an MMR proof. + /// Generate MMR proof for the given leaf indices. /// - /// Returns `true` if the proof is valid, else returns the verification error. - #[method(name = "mmr_verifyProof")] - fn verify_proof(&self, proof: LeavesProof) -> RpcResult; - - /// Verify an MMR `proof` statelessly given an `mmr_root`. - /// - /// This method calls into a runtime with MMR pallet included and attempts to verify - /// an MMR proof against a provided MMR root. + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// MMR proof for a set of leaves at the given `leaf_indices`. + /// Optionally, a block hash at which the runtime should be queried can be specified. /// - /// Returns `true` if the proof is valid, else returns the verification error. - #[method(name = "mmr_verifyProofStateless")] - fn verify_proof_stateless( + /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of + /// the leaves). Both parameters are SCALE-encoded. + /// The order of entries in the `leaves` field of the returned struct + /// is the same as the order of the entries in `leaf_indices` supplied + #[method(name = "mmr_generateBatchProof")] + fn generate_batch_proof( &self, - mmr_root: MmrHash, - proof: LeavesProof, - ) -> RpcResult; + leaf_indices: Vec, + at: Option, + ) -> RpcResult>; } /// MMR RPC methods. @@ -140,110 +144,72 @@ impl Mmr { } #[async_trait] -impl MmrApiServer<::Hash, NumberFor, MmrHash> - for Mmr +impl MmrApiServer<::Hash> for Mmr where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - Client::Api: MmrRuntimeApi>, + Client::Api: MmrRuntimeApi, MmrHash: Codec + Send + Sync + 'static, { - fn mmr_root(&self, at: Option<::Hash>) -> RpcResult { - let block_hash = at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash); + fn generate_proof( + &self, + leaf_index: LeafIndex, + at: Option<::Hash>, + ) -> RpcResult> { let api = self.client.runtime_api(); - let mmr_root = api - .mmr_root(&BlockId::Hash(block_hash)) + let block_hash = at.unwrap_or_else(|| self.client.info().best_hash); + + let (leaf, proof) = api + .generate_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_index, + ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; - Ok(mmr_root) + + Ok(LeafProof::new(block_hash, leaf, proof)) } - fn generate_proof( + fn generate_batch_proof( &self, - block_numbers: Vec>, - best_known_block_number: Option>, + leaf_indices: Vec, at: Option<::Hash>, - ) -> RpcResult::Hash>> { + ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); let block_hash = at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash); let (leaves, proof) = api - .generate_proof_with_context( + .generate_batch_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - block_numbers, - best_known_block_number, + leaf_indices, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; - Ok(LeavesProof::new(block_hash, leaves, proof)) - } - - fn verify_proof(&self, proof: LeavesProof<::Hash>) -> RpcResult { - let api = self.client.runtime_api(); - - let leaves = Decode::decode(&mut &proof.leaves.0[..]) - .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; - - let decoded_proof = Decode::decode(&mut &proof.proof.0[..]) - .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; - - api.verify_proof_with_context( - &BlockId::hash(proof.block_hash), - sp_core::ExecutionContext::OffchainCall(None), - leaves, - decoded_proof, - ) - .map_err(runtime_error_into_rpc_error)? - .map_err(mmr_error_into_rpc_error)?; - - Ok(true) - } - - fn verify_proof_stateless( - &self, - mmr_root: MmrHash, - proof: LeavesProof<::Hash>, - ) -> RpcResult { - let api = self.client.runtime_api(); - - let leaves = Decode::decode(&mut &proof.leaves.0[..]) - .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; - - let decoded_proof = Decode::decode(&mut &proof.proof.0[..]) - .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; - - api.verify_proof_stateless( - &BlockId::hash(proof.block_hash), - mmr_root, - leaves, - decoded_proof, - ) - .map_err(runtime_error_into_rpc_error)? - .map_err(mmr_error_into_rpc_error)?; - - Ok(true) + Ok(LeafBatchProof::new(block_hash, leaves, proof)) } } -/// Converts an mmr-specific error into a [`CallError`]. +/// Converts a mmr-specific error into a [`CallError`]. fn mmr_error_into_rpc_error(err: MmrError) -> CallError { - let error_code = MMR_ERROR + - match err { - MmrError::LeafNotFound => 1, - MmrError::GenerateProof => 2, - MmrError::Verify => 3, - MmrError::BlockNumToLeafIndex => 4, - MmrError::InvalidBestKnownBlock => 5, - _ => 0, - }; - - CallError::Custom(ErrorObject::owned(error_code, err.to_string(), Some(format!("{:?}", err)))) + let data = format!("{:?}", err); + match err { + MmrError::LeafNotFound => CallError::Custom(ErrorObject::owned( + LEAF_NOT_FOUND_ERROR, + "Leaf was not found", + Some(data), + )), + MmrError::GenerateProof => CallError::Custom(ErrorObject::owned( + GENERATE_PROOF_ERROR, + "Error while generating the proof", + Some(data), + )), + _ => CallError::Custom(ErrorObject::owned(MMR_ERROR, "Unexpected MMR error", Some(data))), + } } /// Converts a runtime trap into a [`CallError`]. @@ -265,12 +231,12 @@ mod tests { // given let leaf = vec![1_u8, 2, 3, 4]; let proof = Proof { - leaf_indices: vec![1], + leaf_index: 1, leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], }; - let leaf_proof = LeavesProof::new(H256::repeat_byte(0), vec![leaf], proof); + let leaf_proof = LeafProof::new(H256::repeat_byte(0), leaf, proof); // when let actual = serde_json::to_string(&leaf_proof).unwrap(); @@ -278,22 +244,21 @@ mod tests { // then assert_eq!( actual, - r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x041001020304","proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# + r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaf":"0x1001020304","proof":"0x010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# ); } #[test] - fn should_serialize_leaves_proof() { + fn should_serialize_leaf_batch_proof() { // given - let leaf_a = vec![1_u8, 2, 3, 4]; - let leaf_b = vec![2_u8, 2, 3, 4]; - let proof = Proof { - leaf_indices: vec![1, 2], + let leaf = vec![1_u8, 2, 3, 4]; + let proof = BatchProof { + leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], }; - let leaf_proof = LeavesProof::new(H256::repeat_byte(0), vec![leaf_a, leaf_b], proof); + let leaf_proof = LeafBatchProof::new(H256::repeat_byte(0), vec![leaf], proof); // when let actual = serde_json::to_string(&leaf_proof).unwrap(); @@ -301,19 +266,19 @@ mod tests { // then assert_eq!( actual, - r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x0810010203041002020304","proof":"0x080100000000000000020000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# + r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x041001020304","proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# ); } #[test] fn should_deserialize_leaf_proof() { // given - let expected = LeavesProof { + let expected = LeafProof { block_hash: H256::repeat_byte(0), - leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), + leaf: Bytes(vec![1_u8, 2, 3, 4].encode()), proof: Bytes( Proof { - leaf_indices: vec![1], + leaf_index: 1, leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], } @@ -322,10 +287,10 @@ mod tests { }; // when - let actual: LeavesProof = serde_json::from_str(r#"{ + let actual: LeafProof = serde_json::from_str(r#"{ "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "leaves":"0x041001020304", - "proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" + "leaf":"0x1001020304", + "proof":"0x010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" }"#).unwrap(); // then @@ -333,14 +298,14 @@ mod tests { } #[test] - fn should_deserialize_leaves_proof() { + fn should_deserialize_leaf_batch_proof() { // given - let expected = LeavesProof { + let expected = LeafBatchProof { block_hash: H256::repeat_byte(0), - leaves: Bytes(vec![vec![1_u8, 2, 3, 4], vec![2_u8, 2, 3, 4]].encode()), + leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), proof: Bytes( - Proof { - leaf_indices: vec![1, 2], + BatchProof { + leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], } @@ -349,10 +314,10 @@ mod tests { }; // when - let actual: LeavesProof = serde_json::from_str(r#"{ + let actual: LeafBatchProof = serde_json::from_str(r#"{ "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "leaves":"0x0810010203041002020304", - "proof":"0x080100000000000000020000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" + "leaves":"0x041001020304", + "proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" }"#).unwrap(); // then diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs index d24364a55f9e6..b698e432534d8 100644 --- a/frame/merkle-mountain-range/src/benchmarking.rs +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -17,8 +17,6 @@ //! Benchmarks for the MMR pallet. -#![cfg(feature = "runtime-benchmarks")] - use crate::*; use frame_benchmarking::benchmarks_instance_pallet; use frame_support::traits::OnInitialize; diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs index e513e2197f1c6..73d1963a42964 100644 --- a/frame/merkle-mountain-range/src/default_weights.rs +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -28,9 +28,9 @@ impl crate::WeightInfo for () { // Reading the parent hash. let leaf_weight = DbWeight::get().reads(1); // Blake2 hash cost. - let hash_weight = 2u64 * WEIGHT_PER_NANOS; + let hash_weight = 2 * WEIGHT_PER_NANOS; // No-op hook. - let hook_weight = Weight::zero(); + let hook_weight = 0; leaf_weight .saturating_add(hash_weight) diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index a2d42417ae5dc..4644ebcb7da1c 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -22,7 +22,7 @@ //! Details on Merkle Mountain Ranges (MMRs) can be found here: //! //! -//! The MMR pallet constructs an MMR from leaf data obtained on every block from +//! The MMR pallet constructs a MMR from leaf data obtained on every block from //! `LeafDataProvider`. MMR nodes are stored both in: //! - on-chain storage - hashes only; not full leaf content) //! - off-chain storage - via Indexing API we push full leaf content (and all internal nodes as @@ -50,20 +50,20 @@ //! //! Secondary use case is to archive historical data, but still be able to retrieve them on-demand //! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point -//! in time to provide an MMR proof about some past block hash, while this data can be safely pruned +//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned //! from on-chain storage. //! //! NOTE This pallet is experimental and not proven to work in production. #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use frame_support::{log, traits::Get, weights::Weight}; +use frame_support::weights::Weight; use sp_runtime::{ - traits::{self, CheckedSub, One, Saturating, UniqueSaturatedInto}, + traits::{self, One, Saturating}, SaturatedConversion, }; -#[cfg(feature = "runtime-benchmarks")] +#[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; mod default_weights; mod mmr; @@ -103,15 +103,6 @@ pub trait WeightInfo { fn on_initialize(peaks: NodeIndex) -> Weight; } -/// An MMR specific to the pallet. -type ModuleMmr = mmr::Mmr>; - -/// Leaf data. -type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; - -/// Hashing used for the pallet. -pub(crate) type HashingOf = >::Hashing; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -175,7 +166,7 @@ pub mod pallet { /// Note that the leaf at each block MUST be unique. You may want to include a block hash or /// block number as an easiest way to ensure that. /// Also note that the leaf added by each block is expected to only reference data coming - /// from ancestor blocks (leaves are saved offchain using `(pos, parent_hash)` key to be + /// from ancestor blocks (leaves are saved offchain using `(parent_hash, pos)` key to be /// fork-resistant, as such conflicts could only happen on 1-block deep forks, which means /// two forks with identical line of ancestors compete to write the same offchain key, but /// that's fine as long as leaves only contain data coming from ancestors - conflicting @@ -221,22 +212,12 @@ pub mod pallet { let leaves = Self::mmr_leaves(); let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); let data = T::LeafData::leaf_data(); - // append new leaf to MMR let mut mmr: ModuleMmr = mmr::Mmr::new(leaves); - // MMR push never fails, but better safe than sorry. - if mmr.push(data).is_none() { - log::error!(target: "runtime::mmr", "MMR push failed"); - return T::WeightInfo::on_initialize(peaks_before) - } - // Update the size, `mmr.finalize()` should also never fail. - let (leaves, root) = match mmr.finalize() { - Ok((leaves, root)) => (leaves, root), - Err(e) => { - log::error!(target: "runtime::mmr", "MMR finalize failed: {:?}", e); - return T::WeightInfo::on_initialize(peaks_before) - }, - }; + mmr.push(data).expect("MMR push never fails."); + + // update the size + let (leaves, root) = mmr.finalize().expect("MMR finalize never fails."); >::on_new_root(&root); >::put(leaves); @@ -249,35 +230,21 @@ pub mod pallet { fn offchain_worker(n: T::BlockNumber) { use mmr::storage::{OffchainStorage, Storage}; - // The MMR nodes can be found in offchain db under either: - // - fork-unique keys `(prefix, pos, parent_hash)`, or, - // - "canonical" keys `(prefix, pos)`, - // depending on how many blocks in the past the node at position `pos` was - // added to the MMR. - // - // For the fork-unique keys, the MMR pallet depends on - // `frame_system::block_hash(parent_num)` mappings to find the relevant parent block - // hashes, so it is limited by `frame_system::BlockHashCount` in terms of how many - // historical forks it can track. Nodes added to MMR by block `N` can be found in - // offchain db at: - // - fork-unique keys `(prefix, pos, parent_hash)` when (`N` >= `latest_block` - - // `frame_system::BlockHashCount`); - // - "canonical" keys `(prefix, pos)` when (`N` < `latest_block` - - // `frame_system::BlockHashCount`); - // - // The offchain worker is responsible for maintaining the nodes' positions in - // offchain db as the chain progresses by moving a rolling window of the same size as - // `frame_system::block_hash` map, where nodes/leaves added by blocks that are just + // MMR pallet uses offchain storage to hold full MMR and leaves. + // The leaves are saved under fork-unique keys `(parent_hash, pos)`. + // MMR Runtime depends on `frame_system::block_hash(block_num)` mappings to find + // parent hashes for particular nodes or leaves. + // This MMR offchain worker function moves a rolling window of the same size + // as `frame_system::block_hash` map, where nodes/leaves added by blocks that are just // about to exit the window are "canonicalized" so that their offchain key no longer - // depends on `parent_hash`. + // depends on `parent_hash` therefore on access to `frame_system::block_hash`. // // This approach works to eliminate fork-induced leaf collisions in offchain db, // under the assumption that no fork will be deeper than `frame_system::BlockHashCount` - // blocks: - // entries pertaining to block `N` where `N < current-BlockHashCount` are moved to a - // key based solely on block number. The only way to have collisions is if two - // competing forks are deeper than `frame_system::BlockHashCount` blocks and they - // both "canonicalize" their view of block `N` + // blocks (2400 blocks on Polkadot, Kusama, Rococo, etc): + // entries pertaining to block `N` where `N < current-2400` are moved to a key based + // solely on block number. The only way to have collisions is if two competing forks + // are deeper than 2400 blocks and they both "canonicalize" their view of block `N`. // Once a block is canonicalized, all MMR entries pertaining to sibling blocks from // other forks are pruned from offchain db. Storage::>::canonicalize_and_prune(n); @@ -285,17 +252,26 @@ pub mod pallet { } } +/// A MMR specific to the pallet. +type ModuleMmr = mmr::Mmr>; + +/// Leaf data. +type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; + +/// Hashing used for the pallet. +pub(crate) type HashingOf = >::Hashing; + /// Stateless MMR proof verification for batch of leaves. /// -/// This function can be used to verify received MMR [primitives::Proof] (`proof`) +/// This function can be used to verify received MMR [primitives::BatchProof] (`proof`) /// for given leaves set (`leaves`) against a known MMR root hash (`root`). /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::Proof]. +/// [primitives::BatchProof]. pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::Proof, + proof: primitives::BatchProof, ) -> Result<(), primitives::Error> where H: traits::Hash, @@ -314,32 +290,19 @@ impl, I: 'static> Pallet { /// /// This combination makes the offchain (key,value) entry resilient to chain forks. fn node_offchain_key( - pos: NodeIndex, parent_hash: ::Hash, + pos: NodeIndex, ) -> sp_std::prelude::Vec { - (T::INDEXING_PREFIX, pos, parent_hash).encode() + (T::INDEXING_PREFIX, parent_hash, pos).encode() } /// Build canonical offchain key for node `pos` in MMR. /// /// Used for nodes added by now finalized blocks. - /// Never read keys using `node_canon_offchain_key` unless you sure that - /// there's no `node_offchain_key` key in the storage. fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { (T::INDEXING_PREFIX, pos).encode() } - /// Return size of rolling window of leaves saved in offchain under fork-unique keys. - /// - /// Leaves outside this window are canonicalized. - /// Window size is `frame_system::BlockHashCount - 1` to make sure fork-unique keys - /// can be built using `frame_system::block_hash` map. - fn offchain_canonicalization_window() -> LeafIndex { - let window_size: LeafIndex = - ::BlockHashCount::get().unique_saturated_into(); - window_size.saturating_sub(1) - } - /// Provide the parent number for the block that added `leaf_index` to the MMR. fn leaf_index_to_parent_block_num( leaf_index: LeafIndex, @@ -355,62 +318,20 @@ impl, I: 'static> Pallet { .saturating_add(leaf_index.saturated_into()) } - /// Convert a `block_num` into a leaf index. - fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result - where - T: frame_system::Config, - { - // leaf_idx = (leaves_count - 1) - (current_block_num - block_num); - let best_block_num = >::block_number(); - let blocks_diff = best_block_num.checked_sub(&block_num).ok_or_else(|| { - primitives::Error::BlockNumToLeafIndex - .log_debug("The provided block_number is greater than the best block number.") - })?; - let blocks_diff_as_leaf_idx = blocks_diff.try_into().map_err(|_| { - primitives::Error::BlockNumToLeafIndex - .log_debug("The `blocks_diff` couldn't be converted to `LeafIndex`.") - })?; - - let leaf_idx = Self::mmr_leaves() - .checked_sub(1) - .and_then(|last_leaf_idx| last_leaf_idx.checked_sub(blocks_diff_as_leaf_idx)) - .ok_or_else(|| { - primitives::Error::BlockNumToLeafIndex - .log_debug("There aren't enough leaves in the chain.") - })?; - Ok(leaf_idx) - } - - /// Generate an MMR proof for the given `block_numbers`. - /// If `best_known_block_number = Some(n)`, this generates a historical proof for - /// the chain with head at height `n`. - /// Else it generates a proof for the MMR at the current block height. + /// Generate a MMR proof for the given `leaf_indices`. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. - pub fn generate_proof( - block_numbers: Vec, - best_known_block_number: Option, - ) -> Result<(Vec>, primitives::Proof<>::Hash>), primitives::Error> { - // check whether best_known_block_number provided, else use current best block - let best_known_block_number = - best_known_block_number.unwrap_or_else(|| >::block_number()); - - let leaves_count = - Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); - - // we need to translate the block_numbers into leaf indices. - let leaf_indices = block_numbers - .iter() - .map(|block_num| -> Result { - Self::block_num_to_leaf_index(*block_num) - }) - .collect::, _>>()?; - - let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); - mmr.generate_proof(leaf_indices) + pub fn generate_batch_proof( + leaf_indices: Vec, + ) -> Result< + (Vec>, primitives::BatchProof<>::Hash>), + primitives::Error, + > { + let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + mmr.generate_batch_proof(leaf_indices) } /// Return the on-chain MMR root hash. @@ -426,7 +347,7 @@ impl, I: 'static> Pallet { /// or the proof is invalid. pub fn verify_leaves( leaves: Vec>, - proof: primitives::Proof<>::Hash>, + proof: primitives::BatchProof<>::Hash>, ) -> Result<(), primitives::Error> { if proof.leaf_count > Self::mmr_leaves() || proof.leaf_count == 0 || diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 1f5a5bdae380b..44e684c1bdcac 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -29,11 +29,11 @@ use sp_std::prelude::*; /// Stateless verification of the proof for a batch of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::Proof] +/// [primitives::BatchProof] pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::Proof, + proof: primitives::BatchProof, ) -> Result where H: sp_runtime::traits::Hash, @@ -60,7 +60,7 @@ where .map_err(|e| Error::Verify.log_debug(e)) } -/// A wrapper around an MMR library to expose limited functionality. +/// A wrapper around a MMR library to expose limited functionality. /// /// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) /// vs [Off-chain](crate::mmr::storage::OffchainStorage)). @@ -91,11 +91,11 @@ where /// Verify proof for a set of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have /// the same position in both the `leaves` vector and the `leaf_indices` vector contained in the - /// [primitives::Proof] + /// [primitives::BatchProof] pub fn verify_leaves_proof( &self, leaves: Vec, - proof: primitives::Proof<>::Hash>, + proof: primitives::BatchProof<>::Hash>, ) -> Result { let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), @@ -163,10 +163,10 @@ where /// /// Proof generation requires all the nodes (or their hashes) to be available in the storage. /// (i.e. you can't run the function in the pruned storage). - pub fn generate_proof( + pub fn generate_batch_proof( &self, leaf_indices: Vec, - ) -> Result<(Vec, primitives::Proof<>::Hash>), Error> { + ) -> Result<(Vec, primitives::BatchProof<>::Hash>), Error> { let positions = leaf_indices .iter() .map(|index| mmr_lib::leaf_index_to_pos(*index)) @@ -184,7 +184,7 @@ where self.mmr .gen_proof(positions) .map_err(|e| Error::GenerateProof.log_error(e)) - .map(|p| primitives::Proof { + .map(|p| primitives::BatchProof { leaf_indices, leaf_count, items: p.proof_items().iter().map(|x| x.hash()).collect(), diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs index 19fb7b34382bd..04fdfa199e72b 100644 --- a/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -36,10 +36,10 @@ pub struct Hasher(sp_std::marker::PhantomData<(H, L)>); impl mmr_lib::Merge for Hasher { type Item = Node; - fn merge(left: &Self::Item, right: &Self::Item) -> mmr_lib::Result { + fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { let mut concat = left.hash().as_ref().to_vec(); concat.extend_from_slice(right.hash().as_ref()); - Ok(Node::Hash(::hash(&concat))) + Node::Hash(::hash(&concat)) } } diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index d16ca8cf1e5c8..8b623edf56957 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -15,13 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! An MMR storage implementation. +//! A MMR storage implementations. use codec::Encode; -use frame_support::log::{debug, error, trace}; +use frame_support::traits::Get; use mmr_lib::helper; use sp_core::offchain::StorageKind; use sp_io::{offchain, offchain_index}; +use sp_runtime::traits::UniqueSaturatedInto; use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] use sp_std::prelude::*; @@ -132,14 +133,15 @@ where // Effectively move a rolling window of fork-unique leaves. Once out of the window, leaves // are "canonicalized" in offchain by moving them under `Pallet::node_canon_offchain_key`. let leaves = NumberOfLeaves::::get(); - let window_size = Pallet::::offchain_canonicalization_window(); + let window_size = + ::BlockHashCount::get().unique_saturated_into(); if leaves >= window_size { // Move the rolling window towards the end of `block_num->hash` mappings available // in the runtime: we "canonicalize" the leaf at the end, let to_canon_leaf = leaves.saturating_sub(window_size); // and all the nodes added by that leaf. let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); - debug!( + frame_support::log::debug!( target: "runtime::mmr::offchain", "Nodes to canon for leaf {}: {:?}", to_canon_leaf, to_canon_nodes ); @@ -147,7 +149,7 @@ where let to_canon_block_num = Pallet::::leaf_index_to_parent_block_num(to_canon_leaf, leaves); // Only entries under this hash (retrieved from state on current canon fork) are to be - // persisted. All entries added by same block number on other forks will be cleared. + // persisted. All other entries added by same block number will be cleared. let to_canon_hash = >::block_hash(to_canon_block_num); Self::canonicalize_nodes_for_hash(&to_canon_nodes, to_canon_hash); @@ -157,7 +159,7 @@ where Self::prune_nodes_for_forks(&to_canon_nodes, forks); }) .unwrap_or_else(|| { - error!( + frame_support::log::error!( target: "runtime::mmr::offchain", "Offchain: could not prune: no entry in pruning map for block {:?}", to_canon_block_num @@ -169,8 +171,8 @@ where fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<::Hash>) { for hash in forks { for pos in nodes { - let key = Pallet::::node_offchain_key(*pos, hash); - debug!( + let key = Pallet::::node_offchain_key(hash, *pos); + frame_support::log::debug!( target: "runtime::mmr::offchain", "Clear elem at pos {} with key {:?}", pos, key @@ -185,19 +187,19 @@ where to_canon_hash: ::Hash, ) { for pos in to_canon_nodes { - let key = Pallet::::node_offchain_key(*pos, to_canon_hash); + let key = Pallet::::node_offchain_key(to_canon_hash, *pos); // Retrieve the element from Off-chain DB under fork-aware key. if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) { let canon_key = Pallet::::node_canon_offchain_key(*pos); // Add under new canon key. offchain::local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem); - debug!( + frame_support::log::debug!( target: "runtime::mmr::offchain", "Moved elem at pos {} from key {:?} to canon key {:?}", pos, key, canon_key ); } else { - error!( + frame_support::log::error!( target: "runtime::mmr::offchain", "Could not canonicalize elem at pos {} using key {:?}", pos, key @@ -218,18 +220,21 @@ where // Find out which leaf added node `pos` in the MMR. let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); - let window_size = Pallet::::offchain_canonicalization_window(); + let window_size = + ::BlockHashCount::get().unique_saturated_into(); // Leaves older than this window should have been canonicalized. if leaves.saturating_sub(ancestor_leaf_idx) > window_size { let key = Pallet::::node_canon_offchain_key(pos); - debug!( + frame_support::log::debug!( target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, key {:?}", pos, ancestor_leaf_idx, key ); // Just for safety, to easily handle runtime upgrades where any of the window params // change and maybe we mess up storage migration, // return _if and only if_ node is found (in normal conditions it's always found), - if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + if let Some(elem) = + sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + { return Ok(codec::Decode::decode(&mut &*elem).ok()) } // BUT if we DID MESS UP, fall through to searching node using fork-specific key. @@ -239,20 +244,20 @@ where let ancestor_parent_block_num = Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); - let key = Pallet::::node_offchain_key(pos, ancestor_parent_hash); - debug!( + let key = Pallet::::node_offchain_key(ancestor_parent_hash, pos); + frame_support::log::debug!( target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}", pos, ancestor_leaf_idx, ancestor_parent_hash, key ); // Retrieve the element from Off-chain DB. - Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) + Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) .or_else(|| { // Again, this is just us being extra paranoid. // We get here only if we mess up a storage migration for a runtime upgrades where // say the window is increased, and for a little while following the upgrade there's // leaves inside new 'window' that had been already canonicalized before upgrade. let key = Pallet::::node_canon_offchain_key(pos); - sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) + sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) }) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } @@ -277,8 +282,9 @@ where return Ok(()) } - trace!( - target: "runtime::mmr", "elems: {:?}", + frame_support::log::trace!( + target: "runtime::mmr", + "elems: {:?}", elems.iter().map(|elem| elem.hash()).collect::>() ); @@ -303,12 +309,25 @@ where // in offchain DB to avoid DB collisions and overwrites in case of forks. let parent_hash = >::parent_hash(); for elem in elems { + // For now we store this leaf offchain keyed by `(parent_hash, node_index)` + // to make it fork-resistant. + // Offchain worker task will "canonicalize" it `frame_system::BlockHashCount` blocks + // later when we are not worried about forks anymore (highly unlikely to have a fork + // in the chain that deep). + // "Canonicalization" in this case means moving this leaf under a new key based + // only on the leaf's `node_index`. + let key = Pallet::::node_offchain_key(parent_hash, node_index); + frame_support::log::debug!( + target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", + node_index, parent_hash, key + ); + // Indexing API is used to store the full node content (both leaf and inner). + elem.using_encoded(|elem| offchain_index::set(&key, elem)); + // On-chain we are going to only store new peaks. if peaks_to_store.next_if_eq(&node_index).is_some() { >::insert(node_index, elem.hash()); } - // We are storing full node off-chain (using indexing API). - Self::store_to_offchain(node_index, parent_hash, &elem); // Increase the indices. if let Node::Data(..) = elem { @@ -329,38 +348,6 @@ where } } -impl Storage -where - T: Config, - I: 'static, - L: primitives::FullLeaf, -{ - fn store_to_offchain( - pos: NodeIndex, - parent_hash: ::Hash, - node: &NodeOf, - ) { - let encoded_node = node.encode(); - // We store this leaf offchain keyed by `(parent_hash, node_index)` to make it - // fork-resistant. Offchain worker task will "canonicalize" it - // `frame_system::BlockHashCount` blocks later, when we are not worried about forks anymore - // (multi-era-deep forks should not happen). - let key = Pallet::::node_offchain_key(pos, parent_hash); - debug!( - target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", - pos, parent_hash, key - ); - // Indexing API is used to store the full node content. - offchain_index::set(&key, &encoded_node); - // We also directly save the full node under the "canonical" key. - // This is superfluous for the normal case - this entry will possibly be overwritten - // by forks, and will also be overwritten by "offchain_worker canonicalization". - // But it is required for blocks imported during initial sync where none of the above apply - // (`offchain_worker` doesn't run for initial sync blocks). - offchain_index::set(&Pallet::::node_canon_offchain_key(pos), &encoded_node); - } -} - fn peaks_to_prune_and_store( old_size: NodeIndex, new_size: NodeIndex, @@ -369,8 +356,8 @@ fn peaks_to_prune_and_store( // both collections may share a common prefix. let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) }; let peaks_after = helper::get_peaks(new_size); - trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before); - trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after); + frame_support::log::trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before); + frame_support::log::trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after); let mut peaks_before = peaks_before.into_iter().peekable(); let mut peaks_after = peaks_after.into_iter().peekable(); diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index 16f0922633088..b2b6821fcd054 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -19,17 +19,14 @@ use crate as pallet_mmr; use crate::*; use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - traits::{ConstU32, ConstU64}, -}; +use frame_support::traits::{ConstU32, ConstU64}; use sp_core::H256; use sp_mmr_primitives::{Compact, LeafDataProvider}; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup, Keccak256}, }; -use sp_std::prelude::*; +use sp_std::{cell::RefCell, prelude::*}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -47,8 +44,8 @@ frame_support::construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -56,7 +53,7 @@ impl frame_system::Config for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type BlockWeights = (); @@ -94,14 +91,14 @@ impl LeafData { } } -parameter_types! { - pub static LeafDataTestValue: LeafData = Default::default(); +thread_local! { + pub static LEAF_DATA: RefCell = RefCell::new(Default::default()); } impl LeafDataProvider for LeafData { type LeafData = Self; fn leaf_data() -> Self::LeafData { - LeafDataTestValue::get().clone() + LEAF_DATA.with(|r| r.borrow().clone()) } } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index e47f1b3b2e63a..566a051823d5e 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -27,7 +27,7 @@ use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, }; -use sp_mmr_primitives::{Compact, Proof}; +use sp_mmr_primitives::{BatchProof, Compact}; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() @@ -39,10 +39,10 @@ fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { ext.register_extension(OffchainWorkerExt::new(offchain)); } -fn new_block() -> Weight { +fn new_block() -> u64 { let number = frame_system::Pallet::::block_number() + 1; let hash = H256::repeat_byte(number as u8); - LeafDataTestValue::mutate(|r| r.a = number); + LEAF_DATA.with(|r| r.borrow_mut().a = number); frame_system::Pallet::::reset_events(); frame_system::Pallet::::initialize(&number, &hash, &Default::default()); @@ -110,7 +110,7 @@ fn should_start_empty() { crate::RootHash::::get(), hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0") ); - assert!(weight != Weight::zero()); + assert!(weight != 0); }); } @@ -169,22 +169,25 @@ fn should_append_to_mmr_when_on_initialize_is_called() { ext.persist_offchain_overlay(); let offchain_db = ext.offchain_db(); + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_b1, 0)).map(decode_node), + Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),))) + ); + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_b2, 1)).map(decode_node), + Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),))) + ); + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(parent_b2, 2)).map(decode_node), + Some(mmr::Node::Hash(hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" + ))) + ); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_b2, 3)), None); - let expected = Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1)))); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(0, parent_b1)).map(decode_node), expected); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)).map(decode_node), expected); - - let expected = Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2)))); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(1, parent_b2)).map(decode_node), expected); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)).map(decode_node), expected); - - let expected = Some(mmr::Node::Hash(hex( - "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854", - ))); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(2, parent_b2)).map(decode_node), expected); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)).map(decode_node), expected); - - assert_eq!(offchain_db.get(&MMR::node_offchain_key(3, parent_b2)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)), None); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)), None); assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None); } @@ -224,35 +227,18 @@ fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - let num_blocks: u64 = 7; - ext.execute_with(|| add_blocks(num_blocks as usize)); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - let best_block_number = frame_system::Pallet::::block_number(); - // when generate proofs for all leaves. - let proofs = (1_u64..=best_block_number) - .into_iter() - .map(|block_num| crate::Pallet::::generate_proof(vec![block_num], None).unwrap()) - .collect::>(); - // when generate historical proofs for all leaves - let historical_proofs = (1_u64..best_block_number) + // when generate proofs for all leaves + let proofs = (0_u64..crate::NumberOfLeaves::::get()) .into_iter() - .map(|block_num| { - let mut proofs = vec![]; - for historical_best_block in block_num..=num_blocks { - proofs.push( - crate::Pallet::::generate_proof( - vec![block_num], - Some(historical_best_block), - ) - .unwrap(), - ) - } - proofs + .map(|leaf_index| { + crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() }) .collect::>(); @@ -261,7 +247,7 @@ fn should_generate_proofs_correctly() { proofs[0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - Proof { + BatchProof { leaf_indices: vec![0], leaf_count: 7, items: vec![ @@ -272,85 +258,11 @@ fn should_generate_proofs_correctly() { } ) ); - assert_eq!( - historical_proofs[0][0], - ( - vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - Proof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } - ) - ); - - // D - // / \ - // / \ - // A B C - // / \ / \ / \ - // 1 2 3 4 5 6 7 - // - // we're proving 3 => we need { 4, A, C++7 } - assert_eq!( - proofs[2], - ( - vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { - leaf_indices: vec![2], - leaf_count: 7, - items: vec![ - hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), - hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), - hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), - ], - } - ) - ); - // A - // / \ - // 1 2 3 - // - // we're proving 3 => we need { A } - assert_eq!( - historical_proofs[2][0], - ( - vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { - leaf_indices: vec![2], - leaf_count: 3, - items: vec![hex( - "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" - )], - } - ) - ); - // D - // / \ - // / \ - // A B - // / \ / \ - // 1 2 3 4 5 - // we're proving 3 => we need { 4, A, 5 } - assert_eq!( - historical_proofs[2][2], - ( - vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { - leaf_indices: vec![2], - leaf_count: 5, - items: vec![ - hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), - hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), - hex("3b031d22e24f1126c8f7d2f394b663f9b960ed7abbedb7152e17ce16112656d0") - ], - } - ) - ); - assert_eq!(historical_proofs[2][4], proofs[2]); - assert_eq!( proofs[4], ( - // NOTE: the leaf index is equivalent to the block number(in this case 5) - 1 vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - Proof { + BatchProof { leaf_indices: vec![4], leaf_count: 7, items: vec![ @@ -361,26 +273,11 @@ fn should_generate_proofs_correctly() { } ) ); - assert_eq!( - historical_proofs[4][0], - ( - vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - Proof { - leaf_indices: vec![4], - leaf_count: 5, - items: vec![hex( - "ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252" - ),], - } - ) - ); - assert_eq!(historical_proofs[4][2], proofs[4]); - assert_eq!( proofs[6], ( vec![Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),))], - Proof { + BatchProof { leaf_indices: vec![6], leaf_count: 7, items: vec![ @@ -390,7 +287,6 @@ fn should_generate_proofs_correctly() { } ) ); - assert_eq!(historical_proofs[5][1], proofs[5]); }); } @@ -406,13 +302,13 @@ fn should_generate_batch_proof_correctly() { // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - // when generate proofs for a batch of leaves - let (.., proof) = crate::Pallet::::generate_proof(vec![1, 5, 6], None).unwrap(); + // when generate proofs for all leaves + let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); + // then assert_eq!( proof, - Proof { - // the leaf indices are equivalent to the above specified block numbers - 1. + BatchProof { leaf_indices: vec![0, 4, 5], leaf_count: 7, items: vec![ @@ -422,28 +318,6 @@ fn should_generate_batch_proof_correctly() { ], } ); - - // when generate historical proofs for a batch of leaves - let (.., historical_proof) = - crate::Pallet::::generate_proof(vec![1, 5, 6], Some(6)).unwrap(); - // then - assert_eq!( - historical_proof, - Proof { - leaf_indices: vec![0, 4, 5], - leaf_count: 6, - items: vec![ - hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), - hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), - ], - } - ); - - // when generate historical proofs for a batch of leaves - let (.., historical_proof) = - crate::Pallet::::generate_proof(vec![1, 5, 6], None).unwrap(); - // then - assert_eq!(historical_proof, proof); }); } @@ -462,35 +336,13 @@ fn should_verify() { register_offchain_ext(&mut ext); let (leaves, proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_proof(vec![5], None).unwrap() - }); - let (simple_historical_leaves, simple_historical_proof5) = ext.execute_with(|| { - // when - crate::Pallet::::generate_proof(vec![5], Some(6)).unwrap() - }); - let (advanced_historical_leaves, advanced_historical_proof5) = ext.execute_with(|| { - // when - crate::Pallet::::generate_proof(vec![5], Some(7)).unwrap() + crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); ext.execute_with(|| { add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); - assert_eq!( - crate::Pallet::::verify_leaves( - simple_historical_leaves, - simple_historical_proof5 - ), - Ok(()) - ); - assert_eq!( - crate::Pallet::::verify_leaves( - advanced_historical_leaves, - advanced_historical_proof5 - ), - Ok(()) - ); }); } @@ -498,36 +350,16 @@ fn should_verify() { fn should_verify_batch_proofs() { fn generate_and_verify_batch_proof( ext: &mut sp_io::TestExternalities, - block_numbers: &Vec, + leaves: &Vec, blocks_to_add: usize, ) { - let (leaves, proof) = ext.execute_with(|| { - crate::Pallet::::generate_proof(block_numbers.to_vec(), None).unwrap() - }); - - let max_block_number = ext.execute_with(|| frame_system::Pallet::::block_number()); - let min_block_number = block_numbers.iter().max().unwrap(); - - // generate all possible historical proofs for the given blocks - let historical_proofs = (*min_block_number..=max_block_number) - .map(|best_block| { - ext.execute_with(|| { - crate::Pallet::::generate_proof(block_numbers.to_vec(), Some(best_block)) - .unwrap() - }) - }) - .collect::>(); + let (leaves, proof) = ext + .execute_with(|| crate::Pallet::::generate_batch_proof(leaves.to_vec()).unwrap()); ext.execute_with(|| { add_blocks(blocks_to_add); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); - historical_proofs.iter().for_each(|(leaves, proof)| { - assert_eq!( - crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), - Ok(()) - ); - }); }) } @@ -540,41 +372,39 @@ fn should_verify_batch_proofs() { // to retrieve full leaf data when generating proofs register_offchain_ext(&mut ext); - // verify that up to n=10, valid proofs are generated for all possible block number - // combinations. - for n in 1..=10 { + // verify that up to n=10, valid proofs are generated for all possible leaf combinations + for n in 0..10 { ext.execute_with(|| new_block()); ext.persist_offchain_overlay(); - // generate powerset (skipping empty set) of all possible block number combinations for mmr - // size n. - let blocks_set: Vec> = (1..=n).into_iter().powerset().skip(1).collect(); + // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n + let leaves_set: Vec> = (0..n).into_iter().powerset().skip(1).collect(); - blocks_set.iter().for_each(|blocks_subset| { - generate_and_verify_batch_proof(&mut ext, &blocks_subset, 0); + leaves_set.iter().for_each(|leaves_subset| { + generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); ext.persist_offchain_overlay(); }); } - // verify that up to n=15, valid proofs are generated for all possible 2-block number - // combinations. - for n in 11..=15 { + // verify that up to n=15, valid proofs are generated for all possible 2-leaf combinations + for n in 10..15 { + // (MMR Leafs) ext.execute_with(|| new_block()); ext.persist_offchain_overlay(); - // generate all possible 2-block number combinations for mmr size n. - let blocks_set: Vec> = (1..=n).into_iter().combinations(2).collect(); + // generate all possible 2-leaf combinations for mmr size n + let leaves_set: Vec> = (0..n).into_iter().combinations(2).collect(); - blocks_set.iter().for_each(|blocks_subset| { - generate_and_verify_batch_proof(&mut ext, &blocks_subset, 0); + leaves_set.iter().for_each(|leaves_subset| { + generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); ext.persist_offchain_overlay(); }); } - generate_and_verify_batch_proof(&mut ext, &vec![8, 12], 20); + generate_and_verify_batch_proof(&mut ext, &vec![7, 11], 20); ext.execute_with(|| add_blocks(1000)); ext.persist_offchain_overlay(); - generate_and_verify_batch_proof(&mut ext, &vec![8, 12, 100, 800], 100); + generate_and_verify_batch_proof(&mut ext, &vec![7, 11, 100, 800], 100); } #[test] @@ -584,13 +414,7 @@ fn verification_should_be_stateless() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - let (root_6, root_7) = ext.execute_with(|| { - add_blocks(6); - let root_6 = crate::Pallet::::mmr_root_hash(); - add_blocks(1); - let root_7 = crate::Pallet::::mmr_root_hash(); - (root_6, root_7) - }); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -598,29 +422,14 @@ fn verification_should_be_stateless() { register_offchain_ext(&mut ext); let (leaves, proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_proof(vec![5], None).unwrap() - }); - let (_, historical_proof5) = ext.execute_with(|| { - // when - crate::Pallet::::generate_proof(vec![5], Some(6)).unwrap() + crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); + let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); // Verify proof without relying on any on-chain data. let leaf = crate::primitives::DataOrHash::Data(leaves[0].clone()); assert_eq!( - crate::verify_leaves_proof::<::Hashing, _>( - root_7, - vec![leaf.clone()], - proof5 - ), - Ok(()) - ); - assert_eq!( - crate::verify_leaves_proof::<::Hashing, _>( - root_6, - vec![leaf], - historical_proof5 - ), + crate::verify_leaves_proof::<::Hashing, _>(root, vec![leaf], proof5), Ok(()) ); } @@ -632,13 +441,7 @@ fn should_verify_batch_proof_statelessly() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - let (root_6, root_7) = ext.execute_with(|| { - add_blocks(6); - let root_6 = crate::Pallet::::mmr_root_hash(); - add_blocks(1); - let root_7 = crate::Pallet::::mmr_root_hash(); - (root_6, root_7) - }); + ext.execute_with(|| add_blocks(7)); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -646,17 +449,14 @@ fn should_verify_batch_proof_statelessly() { register_offchain_ext(&mut ext); let (leaves, proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_proof(vec![1, 4, 5], None).unwrap() - }); - let (historical_leaves, historical_proof) = ext.execute_with(|| { - // when - crate::Pallet::::generate_proof(vec![1, 4, 5], Some(6)).unwrap() + crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); + let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); // Verify proof without relying on any on-chain data. assert_eq!( crate::verify_leaves_proof::<::Hashing, _>( - root_7, + root, leaves .into_iter() .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) @@ -665,17 +465,6 @@ fn should_verify_batch_proof_statelessly() { ), Ok(()) ); - assert_eq!( - crate::verify_leaves_proof::<::Hashing, _>( - root_6, - historical_leaves - .into_iter() - .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) - .collect(), - historical_proof - ), - Ok(()) - ); } #[test] @@ -690,7 +479,7 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { ext.execute_with(|| { // when - let (leaves, proof5) = crate::Pallet::::generate_proof(vec![5], None).unwrap(); + let (leaves, proof5) = crate::Pallet::::generate_batch_proof(vec![5]).unwrap(); new_block(); // then @@ -808,20 +597,16 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = (block_num - 1).into(); let leaf_index = u64::from(block_num - 1); let pos = helper::leaf_index_to_pos(leaf_index.into()); + // not canon, + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); let parent_hash = >::block_hash(parent_num); - // Available in offchain db under both fork-proof key and canon key. - // We'll later check it is pruned from fork-proof key. - let expected = Some(mmr::Node::Data(( - (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), - LeafData::new(block_num.into()), - ))); - assert_eq!( - offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), - expected - ); + // but available in fork-proof storage. assert_eq!( - offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node), - expected + offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), + Some(mmr::Node::Data(( + (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), + LeafData::new(block_num.into()), + ))) ); } @@ -832,16 +617,12 @@ fn should_canonicalize_offchain() { let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { let parent_num: BlockNumber = leaf_index.try_into().unwrap(); let parent_hash = >::block_hash(parent_num); - // Available in offchain db under both fork-proof key and canon key. - // We'll later check it is pruned from fork-proof key. - let expected = Some(mmr::Node::Hash(expected)); + // not canon, + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); + // but available in fork-proof storage. assert_eq!( - offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), - expected - ); - assert_eq!( - offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node), - expected + offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), + Some(mmr::Node::Hash(expected)) ); }; verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); @@ -868,7 +649,7 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = (block_num - 1).into(); let parent_hash = >::block_hash(parent_num); // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); // but available using canon key. assert_eq!( offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), @@ -887,7 +668,7 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = leaf_index.try_into().unwrap(); let parent_hash = >::block_hash(parent_num); // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); // but available using canon key. assert_eq!( offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), @@ -924,7 +705,7 @@ fn should_verify_canonicalized() { // Generate proofs for some blocks. let (leaves, proofs) = - ext.execute_with(|| crate::Pallet::::generate_proof(vec![1, 4, 5, 7], None).unwrap()); + ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![0, 4, 5, 7]).unwrap()); // Verify all previously generated proofs. ext.execute_with(|| { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); @@ -932,7 +713,7 @@ fn should_verify_canonicalized() { // Generate proofs for some new blocks. let (leaves, proofs) = ext.execute_with(|| { - crate::Pallet::::generate_proof(vec![block_hash_size + 7], None).unwrap() + crate::Pallet::::generate_batch_proof(vec![block_hash_size + 7]).unwrap() }); // Add some more blocks then verify all previously generated proofs. ext.execute_with(|| { @@ -940,36 +721,3 @@ fn should_verify_canonicalized() { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); }); } - -#[test] -fn does_not_panic_when_generating_historical_proofs() { - let _ = env_logger::try_init(); - let mut ext = new_test_ext(); - - // given 7 blocks (7 MMR leaves) - ext.execute_with(|| add_blocks(7)); - ext.persist_offchain_overlay(); - - // Try to generate historical proof with invalid arguments. This requires the offchain - // extensions to be present to retrieve full leaf data. - register_offchain_ext(&mut ext); - ext.execute_with(|| { - // when leaf index is invalid - assert_eq!( - crate::Pallet::::generate_proof(vec![10], None), - Err(Error::BlockNumToLeafIndex), - ); - - // when leaves count is invalid - assert_eq!( - crate::Pallet::::generate_proof(vec![3], Some(100)), - Err(Error::BlockNumToLeafIndex), - ); - - // when both leaf index and leaves count are invalid - assert_eq!( - crate::Pallet::::generate_proof(vec![10], Some(100)), - Err(Error::BlockNumToLeafIndex), - ); - }); -} diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index bfd0870d30c22..a370215032714 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -22,9 +22,6 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } -# third party -log = { version = "0.4.17", default-features = false } - [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-core = { version = "6.0.0", path = "../../primitives/core" } @@ -32,7 +29,6 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index d5faf9ae8ac1a..8201426f5330f 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -31,7 +31,7 @@ const SEED: u32 = 0; fn setup_multi( s: u32, z: u32, -) -> Result<(Vec, Box<::RuntimeCall>), &'static str> { +) -> Result<(Vec, OpaqueCall), &'static str> { let mut signatories: Vec = Vec::new(); for i in 0..s { let signatory = account("signatory", i, SEED); @@ -41,10 +41,11 @@ fn setup_multi( signatories.push(signatory); } signatories.sort(); - // Must first convert to runtime call type. - let call: ::RuntimeCall = + // Must first convert to outer call type. + let call: ::Call = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); - Ok((signatories, Box::new(call))) + let call_data = OpaqueCall::::from_encoded(call.encode()); + Ok((signatories, call_data)) } benchmarks! { @@ -53,7 +54,7 @@ benchmarks! { let z in 0 .. 10_000; let max_signatories = T::MaxSignatories::get().into(); let (mut signatories, _) = setup_multi::(max_signatories, z)?; - let call: ::RuntimeCall = frame_system::Call::::remark { + let call: ::Call = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_hash = call.using_encoded(blake2_256); @@ -69,104 +70,151 @@ benchmarks! { as_multi_create { // Signatories, need at least 2 total people - let s in 2 .. T::MaxSignatories::get(); + let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(call.encoded()); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, 0) + verify { + assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + assert!(!Calls::::contains_key(call_hash)); + } + + as_multi_create_store { + // Signatories, need at least 2 total people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, Weight::zero()) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, 0) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + assert!(Calls::::contains_key(call_hash)); } as_multi_approve { // Signatories, need at least 3 people (so we don't complete the multisig) - let s in 3 .. T::MaxSignatories::get(); + let s in 3 .. T::MaxSignatories::get() as u32; // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; + // Create the multi, storing for worst case + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + assert!(Calls::::contains_key(call_hash)); + let caller2 = signatories2.remove(0); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, 0) + verify { + let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; + assert_eq!(multisig.approvals.len(), 2); + } + + as_multi_approve_store { + // Signatories, need at least 3 people (so we don't complete the multisig) + let s in 3 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(call.encoded()); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // before the call, get the timepoint + let timepoint = Multisig::::timepoint(); + // Create the multi, not storing + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, 0)?; + assert!(!Calls::::contains_key(call_hash)); let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::zero()) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, 0) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); + assert!(Calls::::contains_key(call_hash)); } as_multi_complete { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); + let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; + // Create the multi, storing it for worst case + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), Weight::zero())?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::MAX) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::max_value()) verify { assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); } approve_as_multi_create { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); + let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length, not a component let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(call.encoded()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); // Create the multi - }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, Weight::zero()) + }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, 0) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); } approve_as_multi_approve { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); + let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length, not a component let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let mut signatories2 = signatories.clone(); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(call.encoded()); // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi @@ -176,38 +224,80 @@ benchmarks! { signatories, None, call, - Weight::zero() + false, + 0 )?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, Weight::zero()) + }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, 0) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); } + approve_as_multi_complete { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length, not a component + let z = 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let call_hash = blake2_256(call.encoded()); + // before the call, get the timepoint + let timepoint = Multisig::::timepoint(); + // Create the multi + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + // Everyone except the first person approves + for i in 1 .. s - 1 { + let mut signatories_loop = signatories2.clone(); + let caller_loop = signatories_loop.remove(i as usize); + let o = RawOrigin::Signed(caller_loop).into(); + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; + } + let caller2 = signatories2.remove(0); + assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: approve_as_multi( + RawOrigin::Signed(caller2), + s as u16, + signatories2, + Some(timepoint), + call_hash, + Weight::max_value() + ) + verify { + assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); + } + cancel_as_multi { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); + let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length, not a component let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(call.encoded()); let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, Weight::zero())?; + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, 0)?; assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + assert!(Calls::::contains_key(call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) verify { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); + assert!(!Calls::::contains_key(call_hash)); } impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index ae4efb76335a0..d4ea041e5820e 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -47,20 +47,18 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; -pub mod migrations; mod tests; pub mod weights; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use frame_support::{ dispatch::{ - DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, - PostDispatchInfo, + DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo, }, ensure, - traits::{Currency, Get, ReservableCurrency}, - weights::Weight, - BoundedVec, RuntimeDebug, + traits::{Currency, Get, ReservableCurrency, WrapperKeepOpaque}, + weights::{GetDispatchInfo, Weight}, + RuntimeDebug, }; use frame_system::{self as system, RawOrigin}; use scale_info::TypeInfo; @@ -74,29 +72,13 @@ pub use weights::WeightInfo; pub use pallet::*; -/// The log target of this pallet. -pub const LOG_TARGET: &'static str = "runtime::multisig"; - -// syntactic sugar for logging. -#[macro_export] -macro_rules! log { - ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { - log::$level!( - target: crate::LOG_TARGET, - concat!("[{:?}] ✍️ ", $patter), >::block_number() $(, $values)* - ) - }; -} - type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. -#[derive( - Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, -)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct Timepoint { /// The height of the chain at the point in time. height: BlockNumber, @@ -105,12 +87,8 @@ pub struct Timepoint { } /// An open multisig operation. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] -#[scale_info(skip_type_params(MaxApprovals))] -pub struct Multisig -where - MaxApprovals: Get, -{ +#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] +pub struct Multisig { /// The extrinsic when the multisig operation was opened. when: Timepoint, /// The amount held in reserve of the `depositor`, to be returned once the operation ends. @@ -118,13 +96,15 @@ where /// The account who opened it (i.e. the first to approve it). depositor: AccountId, /// The approvals achieved so far, including the depositor. Always sorted. - approvals: BoundedVec, + approvals: Vec, } +type OpaqueCall = WrapperKeepOpaque<::Call>; + type CallHash = [u8; 32]; enum CallOrHash { - Call(::RuntimeCall), + Call(OpaqueCall, bool), Hash([u8; 32]), } @@ -137,11 +117,11 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The overarching call type. - type RuntimeCall: Parameter - + Dispatchable + type Call: Parameter + + Dispatchable + GetDispatchInfo + From>; @@ -165,18 +145,15 @@ pub mod pallet { /// The maximum amount of signatories allowed in the multisig. #[pallet::constant] - type MaxSignatories: Get; + type MaxSignatories: Get; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } - /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::without_storage_info] pub struct Pallet(_); /// The set of open multisig operations. @@ -187,9 +164,13 @@ pub mod pallet { T::AccountId, Blake2_128Concat, [u8; 32], - Multisig, T::AccountId, T::MaxSignatories>, + Multisig, T::AccountId>, >; + #[pallet::storage] + pub type Calls = + StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; + #[pallet::error] pub enum Error { /// Threshold must be 2 or greater. @@ -276,16 +257,16 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) + .saturating_add(dispatch_info.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(dispatch_info.weight), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, ) })] pub fn as_multi_threshold_1( origin: OriginFor, other_signatories: Vec, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let max_sigs = T::MaxSignatories::get() as usize; @@ -361,15 +342,16 @@ pub mod pallet { /// taken for its lifetime of `DepositBase + threshold * DepositFactor`. /// ------------------------------- /// - DB Weight: - /// - Reads: Multisig Storage, [Caller Account] - /// - Writes: Multisig Storage, [Caller Account] + /// - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`) + /// - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`) /// - Plus Call Weight /// # #[pallet::weight({ let s = other_signatories.len() as u32; - let z = call.using_encoded(|d| d.len()) as u32; + let z = call.encoded_len() as u32; T::WeightInfo::as_multi_create(s, z) + .max(T::WeightInfo::as_multi_create_store(s, z)) .max(T::WeightInfo::as_multi_approve(s, z)) .max(T::WeightInfo::as_multi_complete(s, z)) .saturating_add(*max_weight) @@ -379,7 +361,8 @@ pub mod pallet { threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call: Box<::RuntimeCall>, + call: OpaqueCall, + store_call: bool, max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -388,7 +371,7 @@ pub mod pallet { threshold, other_signatories, maybe_timepoint, - CallOrHash::Call(*call), + CallOrHash::Call(call, store_call), max_weight, ) } @@ -433,6 +416,7 @@ pub mod pallet { T::WeightInfo::approve_as_multi_create(s) .max(T::WeightInfo::approve_as_multi_approve(s)) + .max(T::WeightInfo::approve_as_multi_complete(s)) .saturating_add(*max_weight) })] pub fn approve_as_multi( @@ -477,8 +461,8 @@ pub mod pallet { /// - Storage: removes one item. /// ---------------------------------- /// - DB Weight: - /// - Read: Multisig Storage, [Caller Account], Refund Account - /// - Write: Multisig Storage, [Caller Account], Refund Account + /// - Read: Multisig Storage, [Caller Account], Refund Account, Calls + /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls /// # #[pallet::weight(T::WeightInfo::cancel_as_multi(other_signatories.len() as u32))] pub fn cancel_as_multi( @@ -504,6 +488,7 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&m.depositor, m.deposit); debug_assert!(err_amount.is_zero()); >::remove(&id, &call_hash); + Self::clear_call(&call_hash); Self::deposit_event(Event::MultisigCancelled { cancelling: who, @@ -545,12 +530,13 @@ impl Pallet { let id = Self::multi_account_id(&signatories, threshold); // Threshold > 1; this means it's a multi-step operation. We extract the `call_hash`. - let (call_hash, call_len, maybe_call) = match call_or_hash { - CallOrHash::Call(call) => { - let (call_hash, call_len) = call.using_encoded(|d| (blake2_256(d), d.len())); - (call_hash, call_len, Some(call)) + let (call_hash, call_len, maybe_call, store) = match call_or_hash { + CallOrHash::Call(call, should_store) => { + let call_hash = blake2_256(call.encoded()); + let call_len = call.encoded_len(); + (call_hash, call_len, Some(call), should_store) }, - CallOrHash::Hash(h) => (h, 0, None), + CallOrHash::Hash(h) => (h, 0, None, false), }; // Branch on whether the operation has already started or not. @@ -569,16 +555,20 @@ impl Pallet { } // We only bother fetching/decoding call if we know that we're ready to execute. - if let Some(call) = maybe_call.filter(|_| approvals >= threshold) { + let maybe_approved_call = if approvals >= threshold { + Self::get_call(&call_hash, maybe_call.as_ref()) + } else { + None + }; + + if let Some((call, call_len)) = maybe_approved_call { // verify weight - ensure!( - call.get_dispatch_info().weight.all_lte(max_weight), - Error::::MaxWeightTooLow - ); + ensure!(call.get_dispatch_info().weight <= max_weight, Error::::MaxWeightTooLow); // Clean up storage before executing call to avoid an possibility of reentrancy // attack. >::remove(&id, call_hash); + Self::clear_call(&call_hash); T::Currency::unreserve(&m.depositor, m.deposit); let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); @@ -602,11 +592,22 @@ impl Pallet { // We cannot dispatch the call now; either it isn't available, or it is, but we // don't have threshold approvals even with our signature. + // Store the call if desired. + let stored = if let Some(data) = maybe_call.filter(|_| store) { + Self::store_call_and_reserve( + who.clone(), + &call_hash, + data, + BalanceOf::::zero(), + )?; + true + } else { + false + }; + if let Some(pos) = maybe_pos { // Record approval. - m.approvals - .try_insert(pos, who.clone()) - .map_err(|_| Error::::TooManySignatories)?; + m.approvals.insert(pos, who.clone()); >::insert(&id, call_hash, m); Self::deposit_event(Event::MultisigApproval { approving: who, @@ -617,11 +618,17 @@ impl Pallet { } else { // If we already approved and didn't store the Call, then this was useless and // we report an error. - Err(Error::::AlreadyApproved)? + ensure!(stored, Error::::AlreadyApproved); } - let final_weight = - T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32); + let final_weight = if stored { + T::WeightInfo::as_multi_approve_store( + other_signatories_len as u32, + call_len as u32, + ) + } else { + T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32) + }; // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) } @@ -632,10 +639,14 @@ impl Pallet { // Just start the operation by recording it in storage. let deposit = T::DepositBase::get() + T::DepositFactor::get() * threshold.into(); - T::Currency::reserve(&who, deposit)?; - - let initial_approvals = - vec![who.clone()].try_into().map_err(|_| Error::::TooManySignatories)?; + // Store the call if desired. + let stored = if let Some(data) = maybe_call.filter(|_| store) { + Self::store_call_and_reserve(who.clone(), &call_hash, data, deposit)?; + true + } else { + T::Currency::reserve(&who, deposit)?; + false + }; >::insert( &id, @@ -644,18 +655,63 @@ impl Pallet { when: Self::timepoint(), deposit, depositor: who.clone(), - approvals: initial_approvals, + approvals: vec![who.clone()], }, ); Self::deposit_event(Event::NewMultisig { approving: who, multisig: id, call_hash }); - let final_weight = - T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32); + let final_weight = if stored { + T::WeightInfo::as_multi_create_store(other_signatories_len as u32, call_len as u32) + } else { + T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32) + }; // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) } } + /// Place a call's encoded data in storage, reserving funds as appropriate. + /// + /// We store `data` here because storing `call` would result in needing another `.encode`. + /// + /// Returns a `bool` indicating whether the data did end up being stored. + fn store_call_and_reserve( + who: T::AccountId, + hash: &[u8; 32], + data: OpaqueCall, + other_deposit: BalanceOf, + ) -> DispatchResult { + ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); + let deposit = other_deposit + + T::DepositBase::get() + + T::DepositFactor::get() * + BalanceOf::::from(((data.encoded_len() + 31) / 32) as u32); + T::Currency::reserve(&who, deposit)?; + Calls::::insert(&hash, (data, who, deposit)); + Ok(()) + } + + /// Attempt to decode and return the call, provided by the user or from storage. + fn get_call( + hash: &[u8; 32], + maybe_known: Option<&OpaqueCall>, + ) -> Option<(::Call, usize)> { + maybe_known.map_or_else( + || { + Calls::::get(hash) + .and_then(|(data, ..)| Some((data.try_decode()?, data.encoded_len()))) + }, + |data| Some((data.try_decode()?, data.encoded_len())), + ) + } + + /// Attempt to remove a call from storage, returning any deposit on it to the owner. + fn clear_call(hash: &[u8; 32]) { + if let Some((_, who, deposit)) = Calls::::take(hash) { + T::Currency::unreserve(&who, deposit); + } + } + /// The current `Timepoint`. pub fn timepoint() -> Timepoint { Timepoint { diff --git a/frame/multisig/src/migrations.rs b/frame/multisig/src/migrations.rs deleted file mode 100644 index 5085297cde433..0000000000000 --- a/frame/multisig/src/migrations.rs +++ /dev/null @@ -1,86 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Migrations for Multisig Pallet - -use super::*; -use frame_support::{ - dispatch::GetStorageVersion, - traits::{OnRuntimeUpgrade, WrapperKeepOpaque}, - Identity, -}; - -#[cfg(feature = "try-runtime")] -use frame_support::ensure; - -pub mod v1 { - use super::*; - - type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; - - #[frame_support::storage_alias] - type Calls = StorageMap< - Pallet, - Identity, - [u8; 32], - (OpaqueCall, ::AccountId, BalanceOf), - >; - - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let onchain = Pallet::::on_chain_storage_version(); - - ensure!(onchain < 1, "this migration can be deleted"); - - log!(info, "Number of calls to refund and delete: {}", Calls::::iter().count()); - - Ok(Vec::new()) - } - - fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - - if onchain > 0 { - log!(info, "MigrateToV1 should be removed"); - return T::DbWeight::get().reads(1) - } - - Calls::::drain().for_each(|(_call_hash, (_data, caller, deposit))| { - T::Currency::unreserve(&caller, deposit); - }); - - current.put::>(); - - ::BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - let onchain = Pallet::::on_chain_storage_version(); - ensure!(onchain < 2, "this migration needs to be removed"); - ensure!(onchain == 1, "this migration needs to be run"); - ensure!( - Calls::::iter().count() == 0, - "there are some dangling calls that need to be destroyed and refunded" - ); - Ok(()) - } - } -} diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 206e566cf4cb6..d67d06e1bce05 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -24,7 +24,7 @@ use super::*; use crate as pallet_multisig; use frame_support::{ assert_noop, assert_ok, parameter_types, - traits::{ConstU32, ConstU64, Contains}, + traits::{ConstU16, ConstU32, ConstU64, Contains}, }; use sp_core::H256; use sp_runtime::{ @@ -34,6 +34,7 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; +type OpaqueCall = super::OpaqueCall; frame_support::construct_runtime!( pub enum Test where @@ -49,23 +50,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -83,7 +84,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -91,23 +92,23 @@ impl pallet_balances::Config for Test { } pub struct TestBaseCallFilter; -impl Contains for TestBaseCallFilter { - fn contains(c: &RuntimeCall) -> bool { +impl Contains for TestBaseCallFilter { + fn contains(c: &Call) -> bool { match *c { - RuntimeCall::Balances(_) => true, + Call::Balances(_) => true, // Needed for benchmarking - RuntimeCall::System(frame_system::Call::remark { .. }) => true, + Call::System(frame_system::Call::remark { .. }) => true, _ => false, } } } impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type Currency = Balances; type DepositBase = ConstU64<1>; type DepositFactor = ConstU64<1>; - type MaxSignatories = ConstU32<3>; + type MaxSignatories = ConstU16<3>; type WeightInfo = (); } @@ -129,37 +130,40 @@ fn now() -> Timepoint { Multisig::timepoint() } -fn call_transfer(dest: u64, value: u64) -> Box { - Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest, value })) +fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) } #[test] fn multisig_deposit_is_taken_and_returned() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(data.clone()), + false, + 0 )); assert_eq!(Balances::free_balance(1), 2); assert_eq!(Balances::reserved_balance(1), 3); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - call, + OpaqueCall::from_encoded(data), + false, call_weight )); assert_eq!(Balances::free_balance(1), 5); @@ -167,30 +171,126 @@ fn multisig_deposit_is_taken_and_returned() { }); } +#[test] +fn multisig_deposit_is_taken_and_returned_with_call_storage() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = call_transfer(6, 15); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + OpaqueCall::from_encoded(data), + true, + 0 + )); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::reserved_balance(1), 5); + + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash, + call_weight + )); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = call_transfer(6, 15); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(1), 4); + + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + OpaqueCall::from_encoded(data), + true, + 0 + )); + assert_eq!(Balances::free_balance(2), 3); + assert_eq!(Balances::reserved_balance(2), 2); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(1), 4); + + assert_ok!(Multisig::approve_as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + hash, + call_weight + )); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(2), 5); + assert_eq!(Balances::reserved_balance(2), 0); + }); +} + #[test] fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, vec![2, 3], None, - hash, - Weight::zero() + hash.clone(), + 0 )); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 3, vec![1, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash)); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -200,54 +300,49 @@ fn cancel_multisig_returns_deposit() { fn timepoint_checking_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = call_transfer(6, 15); - let hash = blake2_256(&call.encode()); + let call = call_transfer(6, 15).encode(); + let hash = blake2_256(&call); assert_noop!( Multisig::approve_as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 ), Error::::UnexpectedTimepoint, ); - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), - 2, - vec![2, 3], - None, - hash, - Weight::zero() - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(call.clone()), + false, + 0 ), Error::::NoTimepoint, ); let later = Timepoint { index: 1, ..now() }; assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(later), - call, - Weight::zero() + OpaqueCall::from_encoded(call), + false, + 0 ), Error::::WrongTimepoint, ); @@ -255,32 +350,62 @@ fn timepoint_checking_works() { } #[test] -fn multisig_2_of_3_works() { +fn multisig_2_of_3_works_with_call_storing() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let hash = blake2_256(&call.encode()); - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::as_multi( + Origin::signed(1), 2, vec![2, 3], None, + OpaqueCall::from_encoded(data), + true, + 0 + )); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), hash, - Weight::zero() + call_weight )); + assert_eq!(Balances::free_balance(6), 15); + }); +} + +#[test] +fn multisig_2_of_3_works() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = call_transfer(6, 15); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - call, + OpaqueCall::from_encoded(data), + false, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -291,37 +416,39 @@ fn multisig_2_of_3_works() { fn multisig_3_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let hash = blake2_256(&call.encode()); + let data = call.encode(); + let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, vec![2, 3], None, - hash, - Weight::zero() + hash.clone(), + 0 )); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 3, vec![1, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 )); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 3, vec![1, 2], Some(now()), - call, + OpaqueCall::from_encoded(data), + false, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -334,26 +461,100 @@ fn cancel_multisig_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, vec![2, 3], None, - hash, - Weight::zero() + hash.clone(), + 0 )); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 3, vec![1, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 )); assert_noop!( - Multisig::cancel_as_multi(RuntimeOrigin::signed(2), 3, vec![1, 3], now(), hash), + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash),); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); + }); +} + +#[test] +fn cancel_multisig_with_call_storage_works() { + new_test_ext().execute_with(|| { + let call = call_transfer(6, 15).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + OpaqueCall::from_encoded(call), + true, + 0 + )); + assert_eq!(Balances::free_balance(1), 4); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); + assert_noop!( + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Error::::NotOwner, + ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); + assert_eq!(Balances::free_balance(1), 10); + }); +} + +#[test] +fn cancel_multisig_with_alt_call_storage_works() { + new_test_ext().execute_with(|| { + let call = call_transfer(6, 15).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_eq!(Balances::free_balance(1), 6); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + OpaqueCall::from_encoded(call), + true, + 0 + )); + assert_eq!(Balances::free_balance(2), 8); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); }); } @@ -361,28 +562,31 @@ fn cancel_multisig_works() { fn multisig_2_of_3_as_multi_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(data.clone()), + false, + 0 )); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - call, + OpaqueCall::from_encoded(data), + false, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -393,45 +597,51 @@ fn multisig_2_of_3_as_multi_works() { fn multisig_2_of_3_as_multi_with_many_calls_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call1 = call_transfer(6, 10); let call1_weight = call1.get_dispatch_info().weight; + let data1 = call1.encode(); let call2 = call_transfer(7, 5); let call2_weight = call2.get_dispatch_info().weight; + let data2 = call2.encode(); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - call1.clone(), - Weight::zero() + OpaqueCall::from_encoded(data1.clone()), + false, + 0 )); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], None, - call2.clone(), - Weight::zero() + OpaqueCall::from_encoded(data2.clone()), + false, + 0 )); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 2, vec![1, 2], Some(now()), - call1, + OpaqueCall::from_encoded(data1), + false, call1_weight )); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 2, vec![1, 2], Some(now()), - call2, + OpaqueCall::from_encoded(data2), + false, call2_weight )); @@ -444,45 +654,50 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { fn multisig_2_of_3_cannot_reissue_same_call() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 10); let call_weight = call.get_dispatch_info().weight; - let hash = blake2_256(&call.encode()); + let data = call.encode(); + let hash = blake2_256(&data); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(data.clone()), + false, + 0 )); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - call.clone(), + OpaqueCall::from_encoded(data.clone()), + false, call_weight )); assert_eq!(Balances::free_balance(multi), 5); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(data.clone()), + false, + 0 )); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 2, vec![1, 2], Some(now()), - call.clone(), + OpaqueCall::from_encoded(data), + false, call_weight )); @@ -503,26 +718,28 @@ fn multisig_2_of_3_cannot_reissue_same_call() { #[test] fn minimum_threshold_check_works() { new_test_ext().execute_with(|| { - let call = call_transfer(6, 15); + let call = call_transfer(6, 15).encode(); assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 0, vec![2], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(call.clone()), + false, + 0 ), Error::::MinimumThreshold, ); assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 1, vec![2], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(call.clone()), + false, + 0 ), Error::::MinimumThreshold, ); @@ -532,15 +749,16 @@ fn minimum_threshold_check_works() { #[test] fn too_many_signatories_fails() { new_test_ext().execute_with(|| { - let call = call_transfer(6, 15); + let call = call_transfer(6, 15).encode(); assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3, 4], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(call), + false, + 0 ), Error::::TooManySignatories, ); @@ -553,40 +771,40 @@ fn duplicate_approvals_are_ignored() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - hash, - Weight::zero() + hash.clone(), + 0 )); assert_noop!( Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 ), Error::::AlreadyApproved, ); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 )); assert_noop!( Multisig::approve_as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 2, vec![1, 2], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 ), Error::::AlreadyApproved, ); @@ -597,39 +815,30 @@ fn duplicate_approvals_are_ignored() { fn multisig_1_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 1); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = call_transfer(6, 15); - let hash = blake2_256(&call.encode()); + let call = call_transfer(6, 15).encode(); + let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi( - RuntimeOrigin::signed(1), - 1, - vec![2, 3], - None, - hash, - Weight::zero() - ), + Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone(), 0), Error::::MinimumThreshold, ); assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 1, vec![2, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(call), + false, + 0 ), Error::::MinimumThreshold, ); - assert_ok!(Multisig::as_multi_threshold_1( - RuntimeOrigin::signed(1), - vec![2, 3], - call_transfer(6, 15) - )); + let boxed_call = Box::new(call_transfer(6, 15)); + assert_ok!(Multisig::as_multi_threshold_1(Origin::signed(1), vec![2, 3], boxed_call)); assert_eq!(Balances::free_balance(6), 15); }); @@ -638,9 +847,9 @@ fn multisig_1_of_3_works() { #[test] fn multisig_filters() { new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::System(frame_system::Call::set_code { code: vec![] })); + let call = Box::new(Call::System(frame_system::Call::set_code { code: vec![] })); assert_noop!( - Multisig::as_multi_threshold_1(RuntimeOrigin::signed(1), vec![2], call.clone()), + Multisig::as_multi_threshold_1(Origin::signed(1), vec![2], call.clone()), DispatchError::from(frame_system::Error::::CallFiltered), ); }); @@ -650,29 +859,32 @@ fn multisig_filters() { fn weight_check_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 15); + let data = call.encode(); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, vec![2, 3], None, - call.clone(), - Weight::zero() + OpaqueCall::from_encoded(data.clone()), + false, + 0 )); assert_eq!(Balances::free_balance(6), 0); assert_noop!( Multisig::as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 2, vec![1, 3], Some(now()), - call, - Weight::zero() + OpaqueCall::from_encoded(data), + false, + 0 ), Error::::MaxWeightTooLow, ); @@ -686,45 +898,47 @@ fn multisig_handles_no_preimage_after_all_approve() { // the call will go through. new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let hash = blake2_256(&call.encode()); + let data = call.encode(); + let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), + Origin::signed(1), 3, vec![2, 3], None, - hash, - Weight::zero() + hash.clone(), + 0 )); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), + Origin::signed(2), 3, vec![1, 3], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 )); assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 3, vec![1, 2], Some(now()), - hash, - Weight::zero() + hash.clone(), + 0 )); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(3), + Origin::signed(3), 3, vec![1, 2], Some(now()), - call, + OpaqueCall::from_encoded(data), + false, call_weight )); assert_eq!(Balances::free_balance(6), 15); diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 1f435cb9f9087..7946b96546768 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_multisig //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/multisig/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/multisig/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,176 +46,213 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight; fn as_multi_create(s: u32, z: u32, ) -> Weight; + fn as_multi_create_store(s: u32, z: u32, ) -> Weight; fn as_multi_approve(s: u32, z: u32, ) -> Weight; + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight; fn as_multi_complete(s: u32, z: u32, ) -> Weight; fn approve_as_multi_create(s: u32, ) -> Weight; fn approve_as_multi_approve(s: u32, ) -> Weight; + fn approve_as_multi_complete(s: u32, ) -> Weight; fn cancel_as_multi(s: u32, ) -> Weight; } /// Weights for pallet_multisig using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Minimum execution time: 20_447 nanoseconds. - Weight::from_ref_time(20_896_236 as u64) - // Standard Error: 2 - .saturating_add(Weight::from_ref_time(568 as u64).saturating_mul(z as u64)) + fn as_multi_threshold_1(_z: u32, ) -> Weight { + (17_537_000 as Weight) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 54_987 nanoseconds. - Weight::from_ref_time(42_525_077 as u64) - // Standard Error: 562 - .saturating_add(Weight::from_ref_time(136_064 as u64).saturating_mul(s as u64)) - // Standard Error: 5 - .saturating_add(Weight::from_ref_time(1_508 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. + (36_535_000 as Weight) + // Standard Error: 0 + .saturating_add((99_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn as_multi_create_store(s: u32, z: u32, ) -> Weight { + (39_918_000 as Weight) + // Standard Error: 1_000 + .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 42_573 nanoseconds. - Weight::from_ref_time(30_585_734 as u64) - // Standard Error: 637 - .saturating_add(Weight::from_ref_time(128_012 as u64).saturating_mul(s as u64)) - // Standard Error: 6 - .saturating_add(Weight::from_ref_time(1_507 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (25_524_000 as Weight) + // Standard Error: 0 + .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { + (39_923_000 as Weight) + // Standard Error: 1_000 + .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 57_143 nanoseconds. - Weight::from_ref_time(43_921_674 as u64) - // Standard Error: 704 - .saturating_add(Weight::from_ref_time(153_474 as u64).saturating_mul(s as u64)) - // Standard Error: 6 - .saturating_add(Weight::from_ref_time(1_536 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (45_877_000 as Weight) + // Standard Error: 1_000 + .saturating_add((146_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { - // Minimum execution time: 39_088 nanoseconds. - Weight::from_ref_time(41_258_697 as u64) - // Standard Error: 1_038 - .saturating_add(Weight::from_ref_time(126_040 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (34_309_000 as Weight) + // Standard Error: 1_000 + .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) - /// The range of component `s` is `[2, 100]`. + // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - // Minimum execution time: 26_872 nanoseconds. - Weight::from_ref_time(28_625_218 as u64) - // Standard Error: 793 - .saturating_add(Weight::from_ref_time(128_542 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (22_848_000 as Weight) + // Standard Error: 0 + .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn approve_as_multi_complete(s: u32, ) -> Weight { + (63_239_000 as Weight) + // Standard Error: 1_000 + .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) - /// The range of component `s` is `[2, 100]`. + // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - // Minimum execution time: 37_636 nanoseconds. - Weight::from_ref_time(39_614_705 as u64) - // Standard Error: 850 - .saturating_add(Weight::from_ref_time(136_222 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (51_254_000 as Weight) + // Standard Error: 1_000 + .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Minimum execution time: 20_447 nanoseconds. - Weight::from_ref_time(20_896_236 as u64) - // Standard Error: 2 - .saturating_add(Weight::from_ref_time(568 as u64).saturating_mul(z as u64)) + fn as_multi_threshold_1(_z: u32, ) -> Weight { + (17_537_000 as Weight) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 54_987 nanoseconds. - Weight::from_ref_time(42_525_077 as u64) - // Standard Error: 562 - .saturating_add(Weight::from_ref_time(136_064 as u64).saturating_mul(s as u64)) - // Standard Error: 5 - .saturating_add(Weight::from_ref_time(1_508 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. + (36_535_000 as Weight) + // Standard Error: 0 + .saturating_add((99_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn as_multi_create_store(s: u32, z: u32, ) -> Weight { + (39_918_000 as Weight) + // Standard Error: 1_000 + .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 42_573 nanoseconds. - Weight::from_ref_time(30_585_734 as u64) - // Standard Error: 637 - .saturating_add(Weight::from_ref_time(128_012 as u64).saturating_mul(s as u64)) - // Standard Error: 6 - .saturating_add(Weight::from_ref_time(1_507 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (25_524_000 as Weight) + // Standard Error: 0 + .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { + (39_923_000 as Weight) + // Standard Error: 1_000 + .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 57_143 nanoseconds. - Weight::from_ref_time(43_921_674 as u64) - // Standard Error: 704 - .saturating_add(Weight::from_ref_time(153_474 as u64).saturating_mul(s as u64)) - // Standard Error: 6 - .saturating_add(Weight::from_ref_time(1_536 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (45_877_000 as Weight) + // Standard Error: 1_000 + .saturating_add((146_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { - // Minimum execution time: 39_088 nanoseconds. - Weight::from_ref_time(41_258_697 as u64) - // Standard Error: 1_038 - .saturating_add(Weight::from_ref_time(126_040 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (34_309_000 as Weight) + // Standard Error: 1_000 + .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) - /// The range of component `s` is `[2, 100]`. + // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - // Minimum execution time: 26_872 nanoseconds. - Weight::from_ref_time(28_625_218 as u64) - // Standard Error: 793 - .saturating_add(Weight::from_ref_time(128_542 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (22_848_000 as Weight) + // Standard Error: 0 + .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn approve_as_multi_complete(s: u32, ) -> Weight { + (63_239_000 as Weight) + // Standard Error: 1_000 + .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Multisig Multisigs (r:1 w:1) - /// The range of component `s` is `[2, 100]`. + // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - // Minimum execution time: 37_636 nanoseconds. - Weight::from_ref_time(39_614_705 as u64) - // Standard Error: 850 - .saturating_add(Weight::from_ref_time(136_222 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (51_254_000 as Weight) + // Standard Error: 1_000 + .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index b3238630d3174..5da06a24df3e5 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -47,7 +47,6 @@ type AccountIdOf = ::AccountId; type BalanceOf = <::Currency as Currency>>::Balance; type NegativeImbalanceOf = <::Currency as Currency>>::NegativeImbalance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -58,7 +57,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The currency trait. type Currency: ReservableCurrency; @@ -71,7 +70,7 @@ pub mod pallet { type Slashed: OnUnbalanced>; /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The minimum length a name may be. #[pallet::constant] @@ -140,7 +139,7 @@ pub mod pallet { let sender = ensure_signed(origin)?; let bounded_name: BoundedVec<_, _> = - name.try_into().map_err(|_| Error::::TooLong)?; + name.try_into().map_err(|()| Error::::TooLong)?; ensure!(bounded_name.len() >= T::MinLength::get() as usize, Error::::TooShort); let deposit = if let Some((_, deposit)) = >::get(&sender) { @@ -194,7 +193,10 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(70_000_000)] - pub fn kill_name(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { + pub fn kill_name( + origin: OriginFor, + target: ::Source, + ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; // Figure out who we're meant to be clearing. @@ -223,13 +225,13 @@ pub mod pallet { #[pallet::weight(70_000_000)] pub fn force_name( origin: OriginFor, - target: AccountIdLookupOf, + target: ::Source, name: Vec, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let bounded_name: BoundedVec<_, _> = - name.try_into().map_err(|_| Error::::TooLong)?; + name.try_into().map_err(|()| Error::::TooLong)?; let target = T::Lookup::lookup(target)?; let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); >::insert(&target, (bounded_name, deposit)); @@ -273,23 +275,23 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -307,7 +309,7 @@ mod tests { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -318,7 +320,7 @@ mod tests { pub const One: u64 = 1; } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type ReservationFee = ConstU64<2>; type Slashed = (); @@ -338,9 +340,9 @@ mod tests { #[test] fn kill_name_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nicks::set_name(RuntimeOrigin::signed(2), b"Dave".to_vec())); + assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); assert_eq!(Balances::total_balance(&2), 10); - assert_ok!(Nicks::kill_name(RuntimeOrigin::signed(1), 2)); + assert_ok!(Nicks::kill_name(Origin::signed(1), 2)); assert_eq!(Balances::total_balance(&2), 8); assert_eq!(>::get(2), None); }); @@ -350,21 +352,17 @@ mod tests { fn force_name_should_work() { new_test_ext().execute_with(|| { assert_noop!( - Nicks::set_name(RuntimeOrigin::signed(2), b"Dr. David Brubeck, III".to_vec()), + Nicks::set_name(Origin::signed(2), b"Dr. David Brubeck, III".to_vec()), Error::::TooLong, ); - assert_ok!(Nicks::set_name(RuntimeOrigin::signed(2), b"Dave".to_vec())); + assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); assert_eq!(Balances::reserved_balance(2), 2); assert_noop!( - Nicks::force_name(RuntimeOrigin::signed(1), 2, b"Dr. David Brubeck, III".to_vec()), + Nicks::force_name(Origin::signed(1), 2, b"Dr. David Brubeck, III".to_vec()), Error::::TooLong, ); - assert_ok!(Nicks::force_name( - RuntimeOrigin::signed(1), - 2, - b"Dr. Brubeck, III".to_vec() - )); + assert_ok!(Nicks::force_name(Origin::signed(1), 2, b"Dr. Brubeck, III".to_vec())); assert_eq!(Balances::reserved_balance(2), 2); let (name, amount) = >::get(2).unwrap(); assert_eq!(name, b"Dr. Brubeck, III".to_vec()); @@ -375,17 +373,17 @@ mod tests { #[test] fn normal_operation_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nicks::set_name(RuntimeOrigin::signed(1), b"Gav".to_vec())); + assert_ok!(Nicks::set_name(Origin::signed(1), b"Gav".to_vec())); assert_eq!(Balances::reserved_balance(1), 2); assert_eq!(Balances::free_balance(1), 8); assert_eq!(>::get(1).unwrap().0, b"Gav".to_vec()); - assert_ok!(Nicks::set_name(RuntimeOrigin::signed(1), b"Gavin".to_vec())); + assert_ok!(Nicks::set_name(Origin::signed(1), b"Gavin".to_vec())); assert_eq!(Balances::reserved_balance(1), 2); assert_eq!(Balances::free_balance(1), 8); assert_eq!(>::get(1).unwrap().0, b"Gavin".to_vec()); - assert_ok!(Nicks::clear_name(RuntimeOrigin::signed(1))); + assert_ok!(Nicks::clear_name(Origin::signed(1))); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::free_balance(1), 10); }); @@ -394,27 +392,24 @@ mod tests { #[test] fn error_catching_should_work() { new_test_ext().execute_with(|| { - assert_noop!(Nicks::clear_name(RuntimeOrigin::signed(1)), Error::::Unnamed); + assert_noop!(Nicks::clear_name(Origin::signed(1)), Error::::Unnamed); assert_noop!( - Nicks::set_name(RuntimeOrigin::signed(3), b"Dave".to_vec()), + Nicks::set_name(Origin::signed(3), b"Dave".to_vec()), pallet_balances::Error::::InsufficientBalance ); assert_noop!( - Nicks::set_name(RuntimeOrigin::signed(1), b"Ga".to_vec()), + Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), Error::::TooShort ); assert_noop!( - Nicks::set_name(RuntimeOrigin::signed(1), b"Gavin James Wood, Esquire".to_vec()), + Nicks::set_name(Origin::signed(1), b"Gavin James Wood, Esquire".to_vec()), Error::::TooLong ); - assert_ok!(Nicks::set_name(RuntimeOrigin::signed(1), b"Dave".to_vec())); - assert_noop!(Nicks::kill_name(RuntimeOrigin::signed(2), 1), BadOrigin); - assert_noop!( - Nicks::force_name(RuntimeOrigin::signed(2), 1, b"Whatever".to_vec()), - BadOrigin - ); + assert_ok!(Nicks::set_name(Origin::signed(1), b"Dave".to_vec())); + assert_noop!(Nicks::kill_name(Origin::signed(2), 1), BadOrigin); + assert_noop!(Nicks::force_name(Origin::signed(2), 1, b"Whatever".to_vec()), BadOrigin); }); } } diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 638a96eb3321a..07f2e9de37dde 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -46,12 +46,9 @@ pub mod weights; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; -use sp_runtime::traits::StaticLookup; use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; pub use weights::WeightInfo; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -67,7 +64,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The maximum number of well known nodes that are allowed to set #[pallet::constant] @@ -78,16 +75,16 @@ pub mod pallet { type MaxPeerIdLength: Get; /// The origin which can add a well known node. - type AddOrigin: EnsureOrigin; + type AddOrigin: EnsureOrigin; /// The origin which can remove a well known node. - type RemoveOrigin: EnsureOrigin; + type RemoveOrigin: EnsureOrigin; /// The origin which can swap the well known nodes. - type SwapOrigin: EnsureOrigin; + type SwapOrigin: EnsureOrigin; /// The origin which can reset the well known nodes. - type ResetOrigin: EnsureOrigin; + type ResetOrigin: EnsureOrigin; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -214,10 +211,9 @@ pub mod pallet { pub fn add_well_known_node( origin: OriginFor, node: PeerId, - owner: AccountIdLookupOf, + owner: T::AccountId, ) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; - let owner = T::Lookup::lookup(owner)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); let mut nodes = WellKnownNodes::::get(); @@ -359,10 +355,9 @@ pub mod pallet { pub fn transfer_node( origin: OriginFor, node: PeerId, - owner: AccountIdLookupOf, + owner: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; - let owner = T::Lookup::lookup(owner)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); let pre_owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs index fcf7ff0189332..d959d1b8610f5 100644 --- a/frame/node-authorization/src/mock.rs +++ b/frame/node-authorization/src/mock.rs @@ -52,16 +52,16 @@ impl frame_system::Config for Test { type DbWeight = (); type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -82,7 +82,7 @@ ord_parameter_types! { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type MaxWellKnownNodes = ConstU32<4>; type MaxPeerIdLength = ConstU32<2>; type AddOrigin = EnsureSignedBy; diff --git a/frame/node-authorization/src/tests.rs b/frame/node-authorization/src/tests.rs index d9db09db4f46f..ba78d14912133 100644 --- a/frame/node-authorization/src/tests.rs +++ b/frame/node-authorization/src/tests.rs @@ -26,27 +26,19 @@ use sp_runtime::traits::BadOrigin; fn add_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::add_well_known_node(RuntimeOrigin::signed(2), test_node(15), 15), + NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), BadOrigin ); assert_noop!( - NodeAuthorization::add_well_known_node( - RuntimeOrigin::signed(1), - PeerId(vec![1, 2, 3]), - 15 - ), + NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::add_well_known_node(RuntimeOrigin::signed(1), test_node(20), 20), + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), Error::::AlreadyJoined ); - assert_ok!(NodeAuthorization::add_well_known_node( - RuntimeOrigin::signed(1), - test_node(15), - 15 - )); + assert_ok!(NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15)); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) @@ -57,7 +49,7 @@ fn add_well_known_node_works() { assert_eq!(Owners::::get(test_node(15)), Some(15)); assert_noop!( - NodeAuthorization::add_well_known_node(RuntimeOrigin::signed(1), test_node(25), 25), + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), Error::::TooManyNodes ); }); @@ -67,18 +59,15 @@ fn add_well_known_node_works() { fn remove_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::remove_well_known_node(RuntimeOrigin::signed(3), test_node(20)), + NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), BadOrigin ); assert_noop!( - NodeAuthorization::remove_well_known_node( - RuntimeOrigin::signed(2), - PeerId(vec![1, 2, 3]) - ), + NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::remove_well_known_node(RuntimeOrigin::signed(2), test_node(40)), + NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), Error::::NotExist ); @@ -88,10 +77,7 @@ fn remove_well_known_node_works() { ); assert!(AdditionalConnections::::contains_key(test_node(20))); - assert_ok!(NodeAuthorization::remove_well_known_node( - RuntimeOrigin::signed(2), - test_node(20) - )); + assert_ok!(NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20))); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(30)]) @@ -105,16 +91,12 @@ fn remove_well_known_node_works() { fn swap_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(4), - test_node(20), - test_node(5) - ), + NodeAuthorization::swap_well_known_node(Origin::signed(4), test_node(20), test_node(5)), BadOrigin ); assert_noop!( NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(3), + Origin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) ), @@ -122,7 +104,7 @@ fn swap_well_known_node_works() { ); assert_noop!( NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(3), + Origin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) ), @@ -130,7 +112,7 @@ fn swap_well_known_node_works() { ); assert_ok!(NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(3), + Origin::signed(3), test_node(20), test_node(20) )); @@ -140,16 +122,12 @@ fn swap_well_known_node_works() { ); assert_noop!( - NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(3), - test_node(15), - test_node(5) - ), + NodeAuthorization::swap_well_known_node(Origin::signed(3), test_node(15), test_node(5)), Error::::NotExist ); assert_noop!( NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(3), + Origin::signed(3), test_node(20), test_node(30) ), @@ -161,7 +139,7 @@ fn swap_well_known_node_works() { BTreeSet::from_iter(vec![test_node(15)]), ); assert_ok!(NodeAuthorization::swap_well_known_node( - RuntimeOrigin::signed(3), + Origin::signed(3), test_node(20), test_node(5) )); @@ -184,14 +162,14 @@ fn reset_well_known_nodes_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::reset_well_known_nodes( - RuntimeOrigin::signed(3), + Origin::signed(3), vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] ), BadOrigin ); assert_noop!( NodeAuthorization::reset_well_known_nodes( - RuntimeOrigin::signed(4), + Origin::signed(4), vec![ (test_node(15), 15), (test_node(5), 5), @@ -203,7 +181,7 @@ fn reset_well_known_nodes_works() { ); assert_ok!(NodeAuthorization::reset_well_known_nodes( - RuntimeOrigin::signed(4), + Origin::signed(4), vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] )); assert_eq!( @@ -220,15 +198,15 @@ fn reset_well_known_nodes_works() { fn claim_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::claim_node(RuntimeOrigin::signed(1), PeerId(vec![1, 2, 3])), + NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::claim_node(RuntimeOrigin::signed(1), test_node(20)), + NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), Error::::AlreadyClaimed ); - assert_ok!(NodeAuthorization::claim_node(RuntimeOrigin::signed(15), test_node(15))); + assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); assert_eq!(Owners::::get(test_node(15)), Some(15)); }); } @@ -237,21 +215,21 @@ fn claim_node_works() { fn remove_claim_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), PeerId(vec![1, 2, 3])), + NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), test_node(15)), + NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), Error::::NotClaimed ); assert_noop!( - NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), test_node(20)), + NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), Error::::NotOwner ); assert_noop!( - NodeAuthorization::remove_claim(RuntimeOrigin::signed(20), test_node(20)), + NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), Error::::PermissionDenied ); @@ -260,7 +238,7 @@ fn remove_claim_works() { test_node(15), BTreeSet::from_iter(vec![test_node(20)]), ); - assert_ok!(NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), test_node(15))); + assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); assert!(!Owners::::contains_key(test_node(15))); assert!(!AdditionalConnections::::contains_key(test_node(15))); }); @@ -270,20 +248,20 @@ fn remove_claim_works() { fn transfer_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::transfer_node(RuntimeOrigin::signed(15), PeerId(vec![1, 2, 3]), 10), + NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::transfer_node(RuntimeOrigin::signed(15), test_node(15), 10), + NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), Error::::NotClaimed ); assert_noop!( - NodeAuthorization::transfer_node(RuntimeOrigin::signed(15), test_node(20), 10), + NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), Error::::NotOwner ); - assert_ok!(NodeAuthorization::transfer_node(RuntimeOrigin::signed(20), test_node(20), 15)); + assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); assert_eq!(Owners::::get(test_node(20)), Some(15)); }); } @@ -293,7 +271,7 @@ fn add_connections_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::add_connections( - RuntimeOrigin::signed(15), + Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] ), @@ -301,7 +279,7 @@ fn add_connections_works() { ); assert_noop!( NodeAuthorization::add_connections( - RuntimeOrigin::signed(15), + Origin::signed(15), test_node(15), vec![test_node(5)] ), @@ -310,7 +288,7 @@ fn add_connections_works() { assert_noop!( NodeAuthorization::add_connections( - RuntimeOrigin::signed(15), + Origin::signed(15), test_node(20), vec![test_node(5)] ), @@ -318,7 +296,7 @@ fn add_connections_works() { ); assert_ok!(NodeAuthorization::add_connections( - RuntimeOrigin::signed(20), + Origin::signed(20), test_node(20), vec![test_node(15), test_node(5), test_node(25), test_node(20)] )); @@ -334,7 +312,7 @@ fn remove_connections_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::remove_connections( - RuntimeOrigin::signed(15), + Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] ), @@ -342,7 +320,7 @@ fn remove_connections_works() { ); assert_noop!( NodeAuthorization::remove_connections( - RuntimeOrigin::signed(15), + Origin::signed(15), test_node(15), vec![test_node(5)] ), @@ -351,7 +329,7 @@ fn remove_connections_works() { assert_noop!( NodeAuthorization::remove_connections( - RuntimeOrigin::signed(15), + Origin::signed(15), test_node(20), vec![test_node(5)] ), @@ -363,7 +341,7 @@ fn remove_connections_works() { BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]), ); assert_ok!(NodeAuthorization::remove_connections( - RuntimeOrigin::signed(20), + Origin::signed(20), test_node(20), vec![test_node(15), test_node(5)] )); diff --git a/frame/node-authorization/src/weights.rs b/frame/node-authorization/src/weights.rs index 63728c21b65c4..cf182f94273ce 100644 --- a/frame/node-authorization/src/weights.rs +++ b/frame/node-authorization/src/weights.rs @@ -37,13 +37,13 @@ pub trait WeightInfo { } impl WeightInfo for () { - fn add_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } - fn remove_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } - fn swap_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } - fn reset_well_known_nodes() -> Weight { Weight::from_ref_time(50_000_000) } - fn claim_node() -> Weight { Weight::from_ref_time(50_000_000) } - fn remove_claim() -> Weight { Weight::from_ref_time(50_000_000) } - fn transfer_node() -> Weight { Weight::from_ref_time(50_000_000) } - fn add_connections() -> Weight { Weight::from_ref_time(50_000_000) } - fn remove_connections() -> Weight { Weight::from_ref_time(50_000_000) } + fn add_well_known_node() -> Weight { 50_000_000 } + fn remove_well_known_node() -> Weight { 50_000_000 } + fn swap_well_known_node() -> Weight { 50_000_000 } + fn reset_well_known_nodes() -> Weight { 50_000_000 } + fn claim_node() -> Weight { 50_000_000 } + fn remove_claim() -> Weight { 50_000_000 } + fn transfer_node() -> Weight { 50_000_000 } + fn add_connections() -> Weight { 50_000_000 } + fn remove_connections() -> Weight { 50_000_000 } } diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index 2db0b234b726d..be5c38d85552c 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -26,17 +26,14 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../primitive sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } log = { version = "0.4.0", default-features = false } -# Optional: usef for testing and/or fuzzing -pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true } -sp-tracing = { version = "5.0.0", path = "../../primitives/tracing", optional = true } - [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } [features] +runtime-benchmarks = [] +try-runtime = [] default = ["std"] -fuzzing = ["pallet-balances", "sp-tracing"] std = [ "codec/std", "scale-info/std", @@ -49,9 +46,3 @@ std = [ "sp-core/std", "log/std", ] -runtime-benchmarks = [ - "sp-staking/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime" -] diff --git a/frame/nomination-pools/benchmarking/Cargo.toml b/frame/nomination-pools/benchmarking/Cargo.toml index 69ba6585481d5..2e045c95ec9b3 100644 --- a/frame/nomination-pools/benchmarking/Cargo.toml +++ b/frame/nomination-pools/benchmarking/Cargo.toml @@ -22,13 +22,12 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../../election-provider-support" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -pallet-bags-list = { version = "4.0.0-dev", default-features = false, path = "../../bags-list" } -pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } -pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../" } +pallet-bags-list = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../bags-list" } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../", features = ["runtime-benchmarks"] } # Substrate Primitives sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime-interface" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } @@ -41,7 +40,6 @@ sp-io = { version = "6.0.0", path = "../../../primitives/io" } [features] default = ["std"] - std = [ "frame-benchmarking/std", "frame-election-provider-support/std", @@ -51,20 +49,7 @@ std = [ "pallet-staking/std", "pallet-nomination-pools/std", "sp-runtime/std", - "sp-runtime-interface/std", - "sp-io/std", "sp-staking/std", - "sp-std/std", -] - -runtime-benchmarks = [ - "frame-election-provider-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "sp-staking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "pallet-nomination-pools/runtime-benchmarks", - "pallet-bags-list/runtime-benchmarks", + "sp-std/std", + "pallet-balances/std", ] diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 9b063539152b7..4c2c902846a85 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -17,7 +17,6 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(test)] @@ -26,13 +25,13 @@ mod mock; use frame_benchmarking::{account, frame_support::traits::Currency, vec, whitelist_account, Vec}; use frame_election_provider_support::SortedListProvider; use frame_support::{assert_ok, ensure, traits::Get}; -use frame_system::RawOrigin as RuntimeOrigin; +use frame_system::RawOrigin as Origin; use pallet_nomination_pools::{ BalanceOf, BondExtra, BondedPoolInner, BondedPools, ConfigOp, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; -use sp_runtime::traits::{Bounded, StaticLookup, Zero}; +use sp_runtime::traits::{Bounded, Zero}; use sp_staking::{EraIndex, StakingInterface}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; @@ -42,16 +41,19 @@ type CurrencyOf = ::Currency; const USER_SEED: u32 = 0; const MAX_SPANS: u32 = 100; -type VoterBagsListInstance = pallet_bags_list::Instance1; pub trait Config: - pallet_nomination_pools::Config - + pallet_staking::Config - + pallet_bags_list::Config + pallet_nomination_pools::Config + pallet_staking::Config + pallet_bags_list::Config { } pub struct Pallet(Pools); +fn min_create_bond() -> BalanceOf { + MinCreateBond::::get() + .max(T::StakingInterface::minimum_bond()) + .max(CurrencyOf::::minimum_balance()) +} + fn create_funded_user_with_balance( string: &'static str, n: u32, @@ -71,14 +73,13 @@ fn create_pool_account( let ed = CurrencyOf::::minimum_balance(); let pool_creator: T::AccountId = create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); - let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); Pools::::create( - RuntimeOrigin::Signed(pool_creator.clone()).into(), + Origin::Signed(pool_creator.clone()).into(), balance, - pool_creator_lookup.clone(), - pool_creator_lookup.clone(), - pool_creator_lookup, + pool_creator.clone(), + pool_creator.clone(), + pool_creator.clone(), ) .unwrap(); @@ -133,15 +134,15 @@ impl ListScenario { // Create accounts with the origin weight let (pool_creator1, pool_origin1) = create_pool_account::(USER_SEED + 1, origin_weight); - T::Staking::nominate( - &pool_origin1, + T::StakingInterface::nominate( + pool_origin1.clone(), // NOTE: these don't really need to be validators. vec![account("random_validator", 0, USER_SEED)], )?; let (_, pool_origin2) = create_pool_account::(USER_SEED + 2, origin_weight); - T::Staking::nominate( - &pool_origin2, + T::StakingInterface::nominate( + pool_origin2.clone(), vec![account("random_validator", 0, USER_SEED)].clone(), )?; @@ -156,7 +157,10 @@ impl ListScenario { // Create an account with the worst case destination weight let (_, pool_dest1) = create_pool_account::(USER_SEED + 3, dest_weight); - T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; + T::StakingInterface::nominate( + pool_dest1.clone(), + vec![account("random_validator", 0, USER_SEED)], + )?; let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); @@ -182,24 +186,25 @@ impl ListScenario { self.origin1_member = Some(joiner.clone()); CurrencyOf::::make_free_balance_be(&joiner, amount * 2u32.into()); - let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); + let original_bonded = T::StakingInterface::active_stake(&self.origin1).unwrap(); // Unbond `amount` from the underlying pool account so when the member joins // we will maintain `current_bonded`. - T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); + T::StakingInterface::unbond(self.origin1.clone(), amount) + .expect("the pool was created in `Self::new`."); // Account pool points for the unbonded balance. BondedPools::::mutate(&1, |maybe_pool| { maybe_pool.as_mut().map(|pool| pool.points -= amount) }); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), amount, 1).unwrap(); + Pools::::join(Origin::Signed(joiner.clone()).into(), amount, 1).unwrap(); - // check that the vote weight is still the same as the original bonded + // Sanity check that the vote weight is still the same as the original bonded let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&self.origin1)).unwrap(), original_bonded); - // check the member was added correctly + // Sanity check the member was added correctly let member = PoolMembers::::get(&joiner).unwrap(); assert_eq!(member.points, amount); assert_eq!(member.pool_id, 1); @@ -210,66 +215,66 @@ impl ListScenario { frame_benchmarking::benchmarks! { join { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let origin_weight = min_create_bond::() * 2u32.into(); // setup the worst case list scenario. let scenario = ListScenario::::new(origin_weight, true)?; assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), + T::StakingInterface::active_stake(&scenario.origin1).unwrap(), origin_weight ); - let max_additional = scenario.dest_weight - origin_weight; + let max_additional = scenario.dest_weight.clone() - origin_weight; let joiner_free = CurrencyOf::::minimum_balance() + max_additional; let joiner: T::AccountId = create_funded_user_with_balance::("joiner", 0, joiner_free); whitelist_account!(joiner); - }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) + }: _(Origin::Signed(joiner.clone()), max_additional, 1) verify { assert_eq!(CurrencyOf::::free_balance(&joiner), joiner_free - max_additional); assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), + T::StakingInterface::active_stake(&scenario.origin1).unwrap(), scenario.dest_weight ); } bond_extra_transfer { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let origin_weight = min_create_bond::() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; - let extra = scenario.dest_weight - origin_weight; + let extra = scenario.dest_weight.clone() - origin_weight; // creator of the src pool will bond-extra, bumping itself to dest bag. - }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) + }: bond_extra(Origin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) verify { assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= + T::StakingInterface::active_stake(&scenario.origin1).unwrap() >= scenario.dest_weight ); } bond_extra_reward { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let origin_weight = min_create_bond::() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; - let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); + let extra = (scenario.dest_weight.clone() - origin_weight).max(CurrencyOf::::minimum_balance()); // transfer exactly `extra` to the depositor of the src pool (1), let reward_account1 = Pools::::create_reward_account(1); assert!(extra >= CurrencyOf::::minimum_balance()); CurrencyOf::::deposit_creating(&reward_account1, extra); - }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::Rewards) + }: bond_extra(Origin::Signed(scenario.creator1.clone()), BondExtra::Rewards) verify { assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= + T::StakingInterface::active_stake(&scenario.origin1).unwrap() >= scenario.dest_weight ); } claim_payout { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let origin_weight = min_create_bond::() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); let (depositor, pool_account) = create_pool_account::(0, origin_weight); let reward_account = Pools::::create_reward_account(1); @@ -284,7 +289,7 @@ frame_benchmarking::benchmarks! { ); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone())) + }:_(Origin::Signed(depositor.clone())) verify { assert_eq!( CurrencyOf::::free_balance(&depositor), @@ -299,27 +304,26 @@ frame_benchmarking::benchmarks! { unbond { // The weight the nominator will start at. The value used here is expected to be // significantly higher than the first position in a list (e.g. the first bag threshold). - let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); + let origin_weight = min_create_bond::() * 200u32.into(); let scenario = ListScenario::::new(origin_weight, false)?; - let amount = origin_weight - scenario.dest_weight; + let amount = origin_weight - scenario.dest_weight.clone(); let scenario = scenario.add_joiner(amount); let member_id = scenario.origin1_member.unwrap().clone(); - let member_id_lookup = T::Lookup::unlookup(member_id.clone()); let all_points = PoolMembers::::get(&member_id).unwrap().points; whitelist_account!(member_id); - }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) + }: _(Origin::Signed(member_id.clone()), member_id.clone(), all_points) verify { - let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); + let bonded_after = T::StakingInterface::active_stake(&scenario.origin1).unwrap(); // We at least went down to the destination bag - assert!(bonded_after <= scenario.dest_weight); + assert!(bonded_after <= scenario.dest_weight.clone()); let member = PoolMembers::::get( &member_id ) .unwrap(); assert_eq!( member.unbonding_eras.keys().cloned().collect::>(), - vec![0 + T::Staking::bonding_duration()] + vec![0 + T::StakingInterface::bonding_duration()] ); assert_eq!( member.unbonding_eras.values().cloned().collect::>(), @@ -330,28 +334,28 @@ frame_benchmarking::benchmarks! { pool_withdraw_unbonded { let s in 0 .. MAX_SPANS; - let min_create_bond = Pools::::depositor_min_bond(); + let min_create_bond = min_create_bond::(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + Pools::::join(Origin::Signed(joiner.clone()).into(), min_join_bond, 1) .unwrap(); // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakingInterface::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); // Unbond the new member - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + Pools::::fully_unbond(Origin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakingInterface::active_stake(&pool_account).unwrap(), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -361,7 +365,7 @@ frame_benchmarking::benchmarks! { // Add `s` count of slashing spans to storage. pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(pool_account); - }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) + }: _(Origin::Signed(pool_account.clone()), 1, s) verify { // The joiners funds didn't change assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); @@ -372,30 +376,29 @@ frame_benchmarking::benchmarks! { withdraw_unbonded_update { let s in 0 .. MAX_SPANS; - let min_create_bond = Pools::::depositor_min_bond(); + let min_create_bond = min_create_bond::(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + Pools::::join(Origin::Signed(joiner.clone()).into(), min_join_bond, 1) .unwrap(); // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakingInterface::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); // Unbond the new member pallet_staking::CurrentEra::::put(0); - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + Pools::::fully_unbond(Origin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakingInterface::active_stake(&pool_account).unwrap(), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -405,7 +408,7 @@ frame_benchmarking::benchmarks! { pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(joiner); - }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) + }: withdraw_unbonded(Origin::Signed(joiner.clone()), joiner.clone(), s) verify { assert_eq!( CurrencyOf::::free_balance(&joiner), @@ -418,9 +421,8 @@ frame_benchmarking::benchmarks! { withdraw_unbonded_kill { let s in 0 .. MAX_SPANS; - let min_create_bond = Pools::::depositor_min_bond(); + let min_create_bond = min_create_bond::(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); - let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // We set the pool to the destroying state so the depositor can leave BondedPools::::try_mutate(&1, |maybe_bonded_pool| { @@ -438,11 +440,11 @@ frame_benchmarking::benchmarks! { // up when unbonding. let reward_account = Pools::::create_reward_account(1); assert!(frame_system::Account::::contains_key(&reward_account)); - Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); + Pools::::fully_unbond(Origin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakingInterface::active_stake(&pool_account).unwrap(), Zero::zero() ); assert_eq!( @@ -463,7 +465,7 @@ frame_benchmarking::benchmarks! { assert!(frame_system::Account::::contains_key(&reward_account)); whitelist_account!(depositor); - }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) + }: withdraw_unbonded(Origin::Signed(depositor.clone()), depositor.clone(), s) verify { // Pool removal worked assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); @@ -483,24 +485,23 @@ frame_benchmarking::benchmarks! { } create { - let min_create_bond = Pools::::depositor_min_bond(); + let min_create_bond = min_create_bond::(); let depositor: T::AccountId = account("depositor", USER_SEED, 0); - let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond CurrencyOf::::make_free_balance_be(&depositor, min_create_bond * 2u32.into()); - // Make sure no Pools exist at a pre-condition for our verify checks + // Make sure no pools exist as a pre-condition for our verify checks assert_eq!(RewardPools::::count(), 0); assert_eq!(BondedPools::::count(), 0); whitelist_account!(depositor); }: _( - RuntimeOrigin::Signed(depositor.clone()), + Origin::Signed(depositor.clone()), min_create_bond, - depositor_lookup.clone(), - depositor_lookup.clone(), - depositor_lookup + depositor.clone(), + depositor.clone(), + depositor.clone() ) verify { assert_eq!(RewardPools::::count(), 1); @@ -521,8 +522,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) + T::StakingInterface::active_stake(&Pools::::create_bonded_account(1)), + Some(min_create_bond) ); } @@ -530,7 +531,7 @@ frame_benchmarking::benchmarks! { let n in 1 .. T::MaxNominations::get(); // Create a pool - let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); + let min_create_bond = min_create_bond::() * 2u32.into(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); // Create some accounts to nominate. For the sake of benchmarking they don't need to be @@ -540,7 +541,7 @@ frame_benchmarking::benchmarks! { .collect(); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) + }:_(Origin::Signed(depositor.clone()), 1, validators) verify { assert_eq!(RewardPools::::count(), 1); assert_eq!(BondedPools::::count(), 1); @@ -560,14 +561,14 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) + T::StakingInterface::active_stake(&Pools::::create_bonded_account(1)), + Some(min_create_bond) ); } set_state { // Create a pool - let min_create_bond = Pools::::depositor_min_bond(); + let min_create_bond = min_create_bond::(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); BondedPools::::mutate(&1, |maybe_pool| { // Force the pool into an invalid state @@ -576,7 +577,7 @@ frame_benchmarking::benchmarks! { let caller = account("caller", 0, USER_SEED); whitelist_account!(caller); - }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) + }:_(Origin::Signed(caller), 1, PoolState::Destroying) verify { assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); } @@ -585,20 +586,20 @@ frame_benchmarking::benchmarks! { let n in 1 .. ::MaxMetadataLen::get(); // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into()); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond::() * 2u32.into()); // Create metadata of the max possible size let metadata: Vec = (0..n).map(|_| 42).collect(); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) + }:_(Origin::Signed(depositor), 1, metadata.clone()) verify { assert_eq!(Metadata::::get(&1), metadata); } set_configs { }:_( - RuntimeOrigin::Root, + Origin::Root, ConfigOp::Set(BalanceOf::::max_value()), ConfigOp::Set(BalanceOf::::max_value()), ConfigOp::Set(u32::MAX), @@ -614,10 +615,10 @@ frame_benchmarking::benchmarks! { update_roles { let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; - let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into()); + let (root, _) = create_pool_account::(0, min_create_bond::() * 2u32.into()); let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); }:_( - RuntimeOrigin::Signed(root.clone()), + Origin::Signed(root.clone()), first_id, ConfigOp::Set(random.clone()), ConfigOp::Set(random.clone()), @@ -636,20 +637,20 @@ frame_benchmarking::benchmarks! { chill { // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into()); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond::() * 2u32.into()); // Nominate with the pool. let validators: Vec<_> = (0..T::MaxNominations::get()) .map(|i| account("stash", USER_SEED, i)) .collect(); - assert_ok!(T::Staking::nominate(&pool_account, validators)); - assert!(T::Staking::nominations(Pools::::create_bonded_account(1)).is_some()); + assert_ok!(Pools::::nominate(Origin::Signed(depositor.clone()).into(), 1, validators)); + assert!(T::StakingInterface::nominations(Pools::::create_bonded_account(1)).is_some()); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1) + }:_(Origin::Signed(depositor.clone()), 1) verify { - assert!(T::Staking::nominations(Pools::::create_bonded_account(1)).is_none()); + assert!(T::StakingInterface::nominations(Pools::::create_bonded_account(1)).is_none()); } impl_benchmark_test_suite!( diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 6959aa9783ee5..d239d4f072b80 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; use sp_runtime::{ @@ -33,16 +32,16 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -70,7 +69,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -97,7 +96,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -110,12 +109,10 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking)>; type GenesisElectionProvider = Self::ElectionProvider; - type VoterList = VoterList; - type TargetList = pallet_staking::UseValidatorsMap; + type VoterList = pallet_bags_list::Pallet; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type OnStakerSlash = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); @@ -125,8 +122,8 @@ parameter_types! { pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; } -impl pallet_bags_list::Config for Runtime { - type RuntimeEvent = RuntimeEvent; +impl pallet_bags_list::Config for Runtime { + type Event = Event; type WeightInfo = (); type BagThresholds = BagThresholds; type ScoreProvider = Staking; @@ -154,13 +151,14 @@ parameter_types! { } impl pallet_nomination_pools::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakingInterface = Staking; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; @@ -182,7 +180,7 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, - VoterList: pallet_bags_list::::{Pallet, Call, Storage, Event}, + BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, Pools: pallet_nomination_pools::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/nomination-pools/fuzzer/Cargo.toml b/frame/nomination-pools/fuzzer/Cargo.toml deleted file mode 100644 index 7dde8733e3f60..0000000000000 --- a/frame/nomination-pools/fuzzer/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "pallet-nomination-pools-fuzzer" -version = "2.0.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Fuzzer for fixed point arithmetic primitives." -documentation = "https://docs.rs/sp-arithmetic-fuzzer" -publish = false - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -honggfuzz = "0.5.54" - -pallet-nomination-pools = { path = "..", features = ["fuzzing"] } - -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } - -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-tracing = { path = "../../../primitives/tracing" } - -rand = { version = "0.8.5", features = ["small_rng"] } -log = "0.4.17" - -[[bin]] -name = "call" -path = "src/call.rs" diff --git a/frame/nomination-pools/fuzzer/src/call.rs b/frame/nomination-pools/fuzzer/src/call.rs deleted file mode 100644 index b07903609e8ab..0000000000000 --- a/frame/nomination-pools/fuzzer/src/call.rs +++ /dev/null @@ -1,353 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Running -//! Running this fuzzer can be done with `cargo hfuzz run call`. `honggfuzz` CLI -//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. -//! -//! # Debugging a panic -//! Once a panic is found, it can be debugged with -//! `cargo hfuzz run-debug per_thing_rational hfuzz_workspace/call/*.fuzz`. - -use frame_support::{ - assert_ok, - traits::{Currency, GetCallName, UnfilteredDispatchable}, -}; -use honggfuzz::fuzz; -use pallet_nomination_pools::{ - log, - mock::*, - pallet as pools, - pallet::{BondedPools, Call as PoolsCall, Event as PoolsEvents, PoolMembers}, - BondExtra, BondedPool, LastPoolId, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, - MinCreateBond, MinJoinBond, PoolId, -}; -use rand::{seq::SliceRandom, Rng}; -use sp_runtime::{assert_eq_error_rate, Perquintill}; - -const ERA: BlockNumber = 1000; -const MAX_ED_MULTIPLE: Balance = 10_000; -const MIN_ED_MULTIPLE: Balance = 10; - -// not quite elegant, just to make it available in random_signed_origin. -const REWARD_AGENT_ACCOUNT: AccountId = 42; - -/// Grab random accounts, either known ones, or new ones. -fn random_signed_origin(rng: &mut R) -> (RuntimeOrigin, AccountId) { - let count = PoolMembers::::count(); - if rng.gen::() && count > 0 { - // take an existing account. - let skip = rng.gen_range(0..count as usize); - - // this is tricky: the account might be our reward agent, which we never want to be - // randomly chosen here. Try another one, or, if it is only our agent, return a random - // one nonetheless. - let candidate = PoolMembers::::iter_keys().skip(skip).take(1).next().unwrap(); - let acc = - if candidate == REWARD_AGENT_ACCOUNT { rng.gen::() } else { candidate }; - - (RuntimeOrigin::signed(acc), acc) - } else { - // create a new account - let acc = rng.gen::(); - (RuntimeOrigin::signed(acc), acc) - } -} - -fn random_ed_multiple(rng: &mut R) -> Balance { - let multiple = rng.gen_range(MIN_ED_MULTIPLE..MAX_ED_MULTIPLE); - ExistentialDeposit::get() * multiple -} - -fn fund_account(rng: &mut R, account: &AccountId) { - let target_amount = random_ed_multiple(rng); - if let Some(top_up) = target_amount.checked_sub(Balances::free_balance(account)) { - let _ = Balances::deposit_creating(account, top_up); - } - assert!(Balances::free_balance(account) >= target_amount); -} - -fn random_existing_pool(mut rng: &mut R) -> Option { - BondedPools::::iter_keys().collect::>().choose(&mut rng).map(|x| *x) -} - -fn random_call(mut rng: &mut R) -> (pools::Call, RuntimeOrigin) { - let op = rng.gen::(); - let mut op_count = as GetCallName>::get_call_names().len(); - // Exclude set_state, set_metadata, set_configs, update_roles and chill. - op_count -= 5; - - match op % op_count { - 0 => { - // join - let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); - let (origin, who) = random_signed_origin(&mut rng); - fund_account(&mut rng, &who); - let amount = random_ed_multiple(&mut rng); - (PoolsCall::::join { amount, pool_id }, origin) - }, - 1 => { - // bond_extra - let (origin, who) = random_signed_origin(&mut rng); - let extra = if rng.gen::() { - BondExtra::Rewards - } else { - fund_account(&mut rng, &who); - let amount = random_ed_multiple(&mut rng); - BondExtra::FreeBalance(amount) - }; - (PoolsCall::::bond_extra { extra }, origin) - }, - 2 => { - // claim_payout - let (origin, _) = random_signed_origin(&mut rng); - (PoolsCall::::claim_payout {}, origin) - }, - 3 => { - // unbond - let (origin, who) = random_signed_origin(&mut rng); - let amount = random_ed_multiple(&mut rng); - (PoolsCall::::unbond { member_account: who, unbonding_points: amount }, origin) - }, - 4 => { - // pool_withdraw_unbonded - let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); - let (origin, _) = random_signed_origin(&mut rng); - (PoolsCall::::pool_withdraw_unbonded { pool_id, num_slashing_spans: 0 }, origin) - }, - 5 => { - // withdraw_unbonded - let (origin, who) = random_signed_origin(&mut rng); - ( - PoolsCall::::withdraw_unbonded { member_account: who, num_slashing_spans: 0 }, - origin, - ) - }, - 6 => { - // create - let (origin, who) = random_signed_origin(&mut rng); - let amount = random_ed_multiple(&mut rng); - fund_account(&mut rng, &who); - let root = who; - let state_toggler = who; - let nominator = who; - (PoolsCall::::create { amount, root, state_toggler, nominator }, origin) - }, - 7 => { - // nominate - let (origin, _) = random_signed_origin(&mut rng); - let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); - let validators = Default::default(); - (PoolsCall::::nominate { pool_id, validators }, origin) - }, - _ => unreachable!(), - } -} - -#[derive(Default)] -struct RewardAgent { - who: AccountId, - pool_id: Option, - expected_reward: Balance, -} - -// TODO: inject some slashes into the game. -impl RewardAgent { - fn new(who: AccountId) -> Self { - Self { who, ..Default::default() } - } - - fn join(&mut self) { - if self.pool_id.is_some() { - return - } - let pool_id = LastPoolId::::get(); - let amount = 10 * ExistentialDeposit::get(); - let origin = RuntimeOrigin::signed(self.who); - let _ = Balances::deposit_creating(&self.who, 10 * amount); - self.pool_id = Some(pool_id); - log::info!(target: "reward-agent", "🤖 reward agent joining in {} with {}", pool_id, amount); - assert_ok!(PoolsCall::join:: { amount, pool_id }.dispatch_bypass_filter(origin)); - } - - fn claim_payout(&mut self) { - // 10 era later, we claim our payout. We expect our income to be roughly what we - // calculated. - if !PoolMembers::::contains_key(&self.who) { - log!(warn, "reward agent is not in the pool yet, cannot claim"); - return - } - let pre = Balances::free_balance(&42); - let origin = RuntimeOrigin::signed(42); - assert_ok!(PoolsCall::::claim_payout {}.dispatch_bypass_filter(origin)); - let post = Balances::free_balance(&42); - - let income = post - pre; - log::info!( - target: "reward-agent", "🤖 CLAIM: actual: {}, expected: {}", - income, - self.expected_reward, - ); - assert_eq_error_rate!(income, self.expected_reward, 10); - self.expected_reward = 0; - } -} - -fn main() { - let mut reward_agent = RewardAgent::new(REWARD_AGENT_ACCOUNT); - sp_tracing::try_init_simple(); - let mut ext = sp_io::TestExternalities::new_empty(); - let mut events_histogram = Vec::<(PoolsEvents, u32)>::default(); - let mut iteration = 0 as BlockNumber; - let mut ok = 0; - let mut err = 0; - - let dot: Balance = (10 as Balance).pow(10); - ExistentialDeposit::set(dot); - BondingDuration::set(8); - - ext.execute_with(|| { - MaxPoolMembers::::set(Some(10_000)); - MaxPoolMembersPerPool::::set(Some(1000)); - MaxPools::::set(Some(1_000)); - - MinCreateBond::::set(10 * ExistentialDeposit::get()); - MinJoinBond::::set(5 * ExistentialDeposit::get()); - System::set_block_number(1); - }); - - loop { - fuzz!(|seed: [u8; 32]| { - use ::rand::{rngs::SmallRng, SeedableRng}; - let mut rng = SmallRng::from_seed(seed); - - ext.execute_with(|| { - let (call, origin) = random_call(&mut rng); - let outcome = call.clone().dispatch_bypass_filter(origin.clone()); - iteration += 1; - match outcome { - Ok(_) => ok += 1, - Err(_) => err += 1, - }; - - log!( - trace, - "iteration {}, call {:?}, origin {:?}, outcome: {:?}, so far {} ok {} err", - iteration, - call, - origin, - outcome, - ok, - err, - ); - - // possibly join the reward_agent - if iteration > ERA / 2 && BondedPools::::count() > 0 { - reward_agent.join(); - } - // and possibly roughly every 4 era, trigger payout for the agent. Doing this more - // frequent is also harmless. - if rng.gen_range(0..(4 * ERA)) == 0 { - reward_agent.claim_payout(); - } - - // execute sanity checks at a fixed interval, possibly on every block. - if iteration % - (std::env::var("SANITY_CHECK_INTERVAL") - .ok() - .and_then(|x| x.parse::().ok())) - .unwrap_or(1) == 0 - { - log!(info, "running sanity checks at {}", iteration); - Pools::do_try_state(u8::MAX).unwrap(); - } - - // collect and reset events. - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let pallet_nomination_pools::mock::RuntimeEvent::Pools(inner) = e { - Some(inner) - } else { - None - } - }) - .for_each(|e| { - if let Some((_, c)) = events_histogram - .iter_mut() - .find(|(x, _)| std::mem::discriminant(x) == std::mem::discriminant(&e)) - { - *c += 1; - } else { - events_histogram.push((e, 1)) - } - }); - System::reset_events(); - - // trigger an era change, and check the status of the reward agent. - if iteration % ERA == 0 { - CurrentEra::mutate(|c| *c += 1); - BondedPools::::iter().for_each(|(id, _)| { - let amount = random_ed_multiple(&mut rng); - let _ = - Balances::deposit_creating(&Pools::create_reward_account(id), amount); - // if we just paid out the reward agent, let's calculate how much we expect - // our reward agent to have earned. - if reward_agent.pool_id.map_or(false, |mid| mid == id) { - let all_points = BondedPool::::get(id).map(|p| p.points).unwrap(); - let member_points = - PoolMembers::::get(reward_agent.who).map(|m| m.points).unwrap(); - let agent_share = Perquintill::from_rational(member_points, all_points); - log::info!( - target: "reward-agent", - "🤖 REWARD = amount = {:?}, ratio: {:?}, share {:?}", - amount, - agent_share, - agent_share * amount, - ); - reward_agent.expected_reward += agent_share * amount; - } - }); - - log!( - info, - "iteration {}, {} pools, {} members, {} ok {} err, events = {:?}", - iteration, - BondedPools::::count(), - PoolMembers::::count(), - ok, - err, - events_histogram - .iter() - .map(|(x, c)| ( - format!("{:?}", x) - .split(" ") - .map(|x| x.to_string()) - .collect::>() - .first() - .cloned() - .unwrap(), - c, - )) - .collect::>(), - ); - } - }) - }) - } -} diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 9ca9539b3dca8..09f1746b8e5ba 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -24,12 +24,11 @@ //! //! * [Key terms](#key-terms) //! * [Usage](#usage) -//! * [Implementor's Guide](#implementors-guide) //! * [Design](#design) //! -//! ## Key Terms +//! ## Key terms //! -//! * pool id: A unique identifier of each pool. Set to u32. +//! * pool id: A unique identifier of each pool. Set to u12 //! * bonded pool: Tracks the distribution of actively staked funds. See [`BondedPool`] and //! [`BondedPoolInner`]. //! * reward pool: Tracks rewards earned by actively staked funds. See [`RewardPool`] and @@ -48,7 +47,7 @@ //! exactly the same rules and conditions as a normal staker. Its bond increases or decreases as //! members join, it can `nominate` or `chill`, and might not even earn staking rewards if it is //! not nominating proper validators. -//! * reward account: A similar key-less account, that is set as the `Payee` account for the bonded +//! * reward account: A similar key-less account, that is set as the `Payee` account fo the bonded //! account for all staking rewards. //! //! ## Usage @@ -89,11 +88,7 @@ //! in the aforementioned range of eras will be affected by the slash. A member is slashed pro-rata //! based on its stake relative to the total slash amount. //! -//! Slashing does not change any single member's balance. Instead, the slash will only reduce the -//! balance associated with a particular pool. But, we never change the total *points* of a pool -//! because of slashing. Therefore, when a slash happens, the ratio of points to balance changes in -//! a pool. In other words, the value of one point, which is initially 1-to-1 against a unit of -//! balance, is now less than one balance because of the slash. +//! For design docs see the [slashing](#slashing) section. //! //! ### Administration //! @@ -101,10 +96,6 @@ //! user must call [`Call::nominate`] to start nominating. [`Call::nominate`] can be called at //! anytime to update validator selection. //! -//! Similar to [`Call::nominate`], [`Call::chill`] will chill to pool in the staking system, and -//! [`Call::pool_withdraw_unbonded`] will withdraw any unbonding chunks of the pool bonded account. -//! The latter call is permissionless and can be called by anyone at any time. -//! //! To help facilitate pool administration the pool has one of three states (see [`PoolState`]): //! //! * Open: Anyone can join the pool and no members can be permissionlessly removed. @@ -130,52 +121,10 @@ //! //! 1. First, all members need to fully unbond and withdraw. If the pool state is set to //! `Destroying`, this can happen permissionlessly. -//! 2. The depositor itself fully unbonds and withdraws. -//! -//! > Note that at this point, based on the requirements of the staking system, the pool's bonded -//! > account's stake might not be able to ge below a certain threshold as a nominator. At this -//! > point, the pool should `chill` itself to allow the depositor to leave. See [`Call::chill`]. -//! -//! ## Implementor's Guide -//! -//! Some notes and common mistakes that wallets/apps wishing to implement this pallet should be -//! aware of: -//! -//! -//! ### Pool Members -//! -//! * In general, whenever a pool member changes their total point, the chain will automatically -//! claim all their pending rewards for them. This is not optional, and MUST happen for the reward -//! calculation to remain correct (see the documentation of `bond` as an example). So, make sure -//! you are warning your users about it. They might be surprised if they see that they bonded an -//! extra 100 DOTs, and now suddenly their 5.23 DOTs in pending reward is gone. It is not gone, it -//! has been paid out to you! -//! * Joining a pool implies transferring funds to the pool account. So it might be (based on which -//! wallet that you are using) that you no longer see the funds that are moved to the pool in your -//! “free balance” section. Make sure the user is aware of this, and not surprised by seeing this. -//! Also, the transfer that happens here is configured to to never accidentally destroy the sender -//! account. So to join a Pool, your sender account must remain alive with 1 DOT left in it. This -//! means, with 1 DOT as existential deposit, and 1 DOT as minimum to join a pool, you need at -//! least 2 DOT to join a pool. Consequently, if you are suggesting members to join a pool with -//! “Maximum possible value”, you must subtract 1 DOT to remain in the sender account to not -//! accidentally kill it. -//! * Points and balance are not the same! Any pool member, at any point in time, can have points in -//! either the bonded pool or any of the unbonding pools. The crucial fact is that in any of these -//! pools, the ratio of point to balance is different and might not be 1. Each pool starts with a -//! ratio of 1, but as time goes on, for reasons such as slashing, the ratio gets broken. Over -//! time, 100 points in a bonded pool can be worth 90 DOTs. Make sure you are either representing -//! points as points (not as DOTs), or even better, always display both: “You have x points in -//! pool y which is worth z DOTs”. See here and here for examples of how to calculate point to -//! balance ratio of each pool (it is almost trivial ;)) -//! -//! ### Pool Management -//! -//! * The pool will be seen from the perspective of the rest of the system as a single nominator. -//! Ergo, This nominator must always respect the `staking.minNominatorBond` limit. Similar to a -//! normal nominator, who has to first `chill` before fully unbonding, the pool must also do the -//! same. The pool’s bonded account will be fully unbonded only when the depositor wants to leave -//! and dismantle the pool. All that said, the message is: the depositor can only leave the chain -//! when they chill the pool first. +//! 2. The depositor itself fully unbonds and withdraws. Note that at this point, based on the +//! requirements of the staking system, the pool's bonded account's stake might not be able to ge +//! below a certain threshold as a nominator. At this point, the pool should `chill` itself to +//! allow the depositor to leave. //! //! ## Design //! @@ -328,15 +277,13 @@ use frame_support::{ Currency, Defensive, DefensiveOption, DefensiveResult, DefensiveSaturating, ExistenceRequirement, Get, }, - DefaultNoBound, + transactional, CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_core::U256; use sp_runtime::{ - traits::{ - AccountIdConversion, CheckedAdd, CheckedSub, Convert, Saturating, StaticLookup, Zero, - }, - FixedPointNumber, + traits::{AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, Zero}, + FixedPointNumber, FixedPointOperand, }; use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; @@ -349,14 +296,14 @@ pub const LOG_TARGET: &'static str = "runtime::nomination-pools"; macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { log::$level!( - target: $crate::LOG_TARGET, + target: crate::LOG_TARGET, concat!("[{:?}] 🏊‍♂️ ", $patter), >::block_number() $(, $values)* ) }; } -#[cfg(any(test, feature = "fuzzing"))] -pub mod mock; +#[cfg(test)] +mod mock; #[cfg(test)] mod tests; @@ -372,7 +319,7 @@ pub type BalanceOf = /// Type used for unique identifier of each pool. pub type PoolId = u32; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type UnbondingPoolsWithEra = BoundedBTreeMap, TotalUnbondingPools>; pub const POINTS_TO_BALANCE_INIT_RATIO: u32 = 1; @@ -446,12 +393,16 @@ impl PoolMember { // // rc * 10^20 / 10^18 = rc * 100 // - // the implementation of `multiply_by_rational_with_rounding` shows that it will only fail - // if the final division is not enough to fit in u128. In other words, if `rc * 100` is more - // than u128::max. Given that RC is interpreted as reward per unit of point, and unit of - // point is equal to balance (normally), and rewards are usually a proportion of the points - // in the pool, the likelihood of rc reaching near u128::MAX is near impossible. - + // meaning that as long as reward_counter's value is less than 1/100th of its max capacity + // (u128::MAX_VALUE), `checked_mul_int` won't saturate. + // + // given the nature of reward counter being 'pending_rewards / pool_total_point', the only + // (unrealistic) way that super high values can be achieved is for a pool to suddenly + // receive massive rewards with a very very small amount of stake. In all normal pools, as + // the points increase, so does the rewards. Moreover, as long as rewards are not + // accumulated for astronomically large durations, + // `current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)` + // won't be extremely big. (current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)) .checked_mul_int(self.active_points()) .ok_or(Error::::OverflowRisk) @@ -540,7 +491,7 @@ impl PoolMember { true } else { removed_points - .try_insert(*e, *p) + .try_insert(*e, p.clone()) .expect("source map is bounded, this is a subset, will be bounded; qed"); false } @@ -638,7 +589,7 @@ impl BondedPool { } /// Get [`Self`] from storage. Returns `None` if no entry for `pool_account` exists. - pub fn get(id: PoolId) -> Option { + fn get(id: PoolId) -> Option { BondedPools::::try_get(id).ok().map(|inner| Self { id, inner }) } @@ -667,7 +618,7 @@ impl BondedPool { /// This is often used for bonding and issuing new funds into the pool. fn balance_to_point(&self, new_funds: BalanceOf) -> BalanceOf { let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + T::StakingInterface::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); Pallet::::balance_to_point(bonded_balance, self.points, new_funds) } @@ -676,7 +627,7 @@ impl BondedPool { /// This is often used for unbonding. fn points_to_balance(&self, points: BalanceOf) -> BalanceOf { let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + T::StakingInterface::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); Pallet::::point_to_balance(bonded_balance, self.points, points) } @@ -727,7 +678,7 @@ impl BondedPool { fn transferrable_balance(&self) -> BalanceOf { let account = self.bonded_account(); T::Currency::free_balance(&account) - .saturating_sub(T::Staking::active_stake(&account).unwrap_or_default()) + .saturating_sub(T::StakingInterface::active_stake(&account).unwrap_or_default()) } fn is_root(&self, who: &T::AccountId) -> bool { @@ -778,11 +729,11 @@ impl BondedPool { /// Whether or not the pool is ok to be in `PoolSate::Open`. If this returns an `Err`, then the /// pool is unrecoverable and should be in the destroying state. - fn ok_to_be_open(&self) -> Result<(), DispatchError> { + fn ok_to_be_open(&self, new_funds: BalanceOf) -> Result<(), DispatchError> { ensure!(!self.is_destroying(), Error::::CanNotChangeState); let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + T::StakingInterface::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); ensure!(!bonded_balance.is_zero(), Error::::OverflowRisk); let points_to_balance_ratio_floor = self @@ -799,6 +750,12 @@ impl BondedPool { points_to_balance_ratio_floor < max_points_to_balance.into(), Error::::OverflowRisk ); + // while restricting the balance to `max_points_to_balance` of max total issuance, + let next_bonded_balance = bonded_balance.saturating_add(new_funds); + ensure!( + next_bonded_balance < BalanceOf::::max_value().div(max_points_to_balance.into()), + Error::::OverflowRisk + ); // then we can be decently confident the bonding pool points will not overflow // `BalanceOf`. Note that these are just heuristics. @@ -807,9 +764,9 @@ impl BondedPool { } /// Check that the pool can accept a member with `new_funds`. - fn ok_to_join(&self) -> Result<(), DispatchError> { + fn ok_to_join(&self, new_funds: BalanceOf) -> Result<(), DispatchError> { ensure!(self.state == PoolState::Open, Error::::NotOpen); - self.ok_to_be_open()?; + self.ok_to_be_open(new_funds)?; Ok(()) } @@ -900,8 +857,8 @@ impl BondedPool { /// Bond exactly `amount` from `who`'s funds into this pool. /// - /// If the bond type is `Create`, `Staking::bond` is called, and `who` - /// is allowed to be killed. Otherwise, `Staking::bond_extra` is called and `who` + /// If the bond type is `Create`, `StakingInterface::bond` is called, and `who` + /// is allowed to be killed. Otherwise, `StakingInterface::bond_extra` is called and `who` /// cannot be killed. /// /// Returns `Ok(points_issues)`, `Err` otherwise. @@ -927,11 +884,16 @@ impl BondedPool { let points_issued = self.issue(amount); match ty { - BondType::Create => T::Staking::bond(&bonded_account, amount, &self.reward_account())?, + BondType::Create => T::StakingInterface::bond( + bonded_account.clone(), + bonded_account, + amount, + self.reward_account(), + )?, // The pool should always be created in such a way its in a state to bond extra, but if // the active balance is slashed below the minimum bonded or the account cannot be // found, we exit early. - BondType::Later => T::Staking::bond_extra(&bonded_account, amount)?, + BondType::Later => T::StakingInterface::bond_extra(bonded_account, amount)?, } Ok(points_issued) @@ -1111,7 +1073,7 @@ pub struct SubPools { /// older then `current_era - TotalUnbondingPools`. no_era: UnbondPool, /// Map of era in which a pool becomes unbonded in => unbond pools. - with_era: BoundedBTreeMap, TotalUnbondingPools>, + with_era: UnbondingPoolsWithEra, } impl SubPools { @@ -1143,7 +1105,7 @@ impl SubPools { } /// The sum of all unbonding balance, regardless of whether they are actually unlocked or not. - #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] + #[cfg(any(test, debug_assertions))] fn sum_unbonding_balance(&self) -> BalanceOf { self.no_era.balance.saturating_add( self.with_era @@ -1160,9 +1122,9 @@ pub struct TotalUnbondingPools(PhantomData); impl Get for TotalUnbondingPools { fn get() -> u32 { // NOTE: this may be dangerous in the scenario bonding_duration gets decreased because - // we would no longer be able to decode `BoundedBTreeMap::, - // TotalUnbondingPools>`, which uses `TotalUnbondingPools` as the bound - T::Staking::bonding_duration() + T::PostUnbondingPoolsWindow::get() + // we would no longer be able to decode `UnbondingPoolsWithEra`, which uses + // `TotalUnbondingPools` as the bound + T::StakingInterface::bonding_duration() + T::PostUnbondingPoolsWindow::get() } } @@ -1171,9 +1133,10 @@ pub mod pallet { use super::*; use frame_support::traits::StorageVersion; use frame_system::{ensure_signed, pallet_prelude::*}; + use sp_runtime::traits::CheckedAdd; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::generate_store(pub(crate) trait Store)] @@ -1183,13 +1146,26 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; /// The nominating balance. - type Currency: Currency; + type Currency: Currency; + + /// Sadly needed to bound it to `FixedPointOperand`. + // The only alternative is to sprinkle a `where BalanceOf: FixedPointOperand` in roughly + // a million places, so we prefer doing this. + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + Default + + FixedPointOperand + + CheckedAdd + + TypeInfo + + MaxEncodedLen; /// The type that is used for reward counter. /// @@ -1231,7 +1207,10 @@ pub mod pallet { type U256ToBalance: Convert>; /// The interface for nominating. - type Staking: StakingInterface, AccountId = Self::AccountId>; + type StakingInterface: StakingInterface< + Balance = BalanceOf, + AccountId = Self::AccountId, + >; /// The amount of eras a `SubPools::with_era` pool can exist before it gets merged into the /// `SubPools::no_era` pool. In other words, this is the amount of eras a member will be @@ -1467,13 +1446,9 @@ pub mod pallet { Defensive(DefensiveError), /// Partial unbonding now allowed permissionlessly. PartialUnbondNotAllowedPermissionlessly, - /// Pool id currently in use. - PoolIdInUse, - /// Pool id provided is not correct/usable. - InvalidPoolId, } - #[derive(Encode, Decode, PartialEq, TypeInfo, frame_support::PalletError, RuntimeDebug)] + #[derive(Encode, Decode, PartialEq, TypeInfo, frame_support::PalletError)] pub enum DefensiveError { /// There isn't enough space in the unbond pool. NotEnoughSpaceInUnbondPool, @@ -1507,6 +1482,7 @@ pub mod pallet { /// `existential deposit + amount` in their account. /// * Only a pool with [`PoolState::Open`] can be joined #[pallet::weight(T::WeightInfo::join())] + #[transactional] pub fn join( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1519,7 +1495,7 @@ pub mod pallet { ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; - bonded_pool.ok_to_join()?; + bonded_pool.ok_to_join(amount)?; let mut reward_pool = RewardPools::::get(pool_id) .defensive_ok_or::>(DefensiveError::RewardPoolNotFound.into())?; @@ -1567,6 +1543,7 @@ pub mod pallet { T::WeightInfo::bond_extra_transfer() .max(T::WeightInfo::bond_extra_reward()) )] + #[transactional] pub fn bond_extra(origin: OriginFor, extra: BondExtra>) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1574,6 +1551,10 @@ pub mod pallet { // payout related stuff: we must claim the payouts, and updated recorded payout data // before updating the bonded pool points, similar to that of `join` transaction. reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; + // TODO: optimize this to not touch the free balance of `who ` at all in benchmarks. + // Currently, bonding rewards is like a batch. In the same PR, also make this function + // take a boolean argument that make it either 100% pure (no storage update), or make it + // also emit event and do the transfer. #11671 let claimed = Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; @@ -1584,9 +1565,8 @@ pub mod pallet { (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed), }; - bonded_pool.ok_to_be_open()?; - member.points = - member.points.checked_add(&points_issued).ok_or(Error::::OverflowRisk)?; + bonded_pool.ok_to_be_open(bonded)?; + member.points = member.points.saturating_add(points_issued); Self::deposit_event(Event::::Bonded { member: who.clone(), @@ -1606,6 +1586,7 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". #[pallet::weight(T::WeightInfo::claim_payout())] + #[transactional] pub fn claim_payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1645,13 +1626,13 @@ pub mod pallet { /// there are too many unlocking chunks, the result of this call will likely be the /// `NoMoreChunks` error from the staking system. #[pallet::weight(T::WeightInfo::unbond())] + #[transactional] pub fn unbond( origin: OriginFor, - member_account: AccountIdLookupOf, + member_account: T::AccountId, #[pallet::compact] unbonding_points: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - let member_account = T::Lookup::lookup(member_account)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&member_account)?; @@ -1663,12 +1644,12 @@ pub mod pallet { let _ = reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; let _ = Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; - let current_era = T::Staking::current_era(); - let unbond_era = T::Staking::bonding_duration().saturating_add(current_era); + let current_era = T::StakingInterface::current_era(); + let unbond_era = T::StakingInterface::bonding_duration().saturating_add(current_era); // Unbond in the actual underlying nominator. let unbonding_balance = bonded_pool.dissolve(unbonding_points); - T::Staking::unbond(&bonded_pool.bonded_account(), unbonding_balance)?; + T::StakingInterface::unbond(bonded_pool.bonded_account(), unbonding_balance)?; // Note that we lazily create the unbonding pools here if they don't already exist let mut sub_pools = SubPoolsStorage::::get(member.pool_id) @@ -1720,6 +1701,7 @@ pub mod pallet { /// would probably see an error like `NoMoreChunks` emitted from the staking system when /// they attempt to unbond. #[pallet::weight(T::WeightInfo::pool_withdraw_unbonded(*num_slashing_spans))] + #[transactional] pub fn pool_withdraw_unbonded( origin: OriginFor, pool_id: PoolId, @@ -1730,7 +1712,7 @@ pub mod pallet { // For now we only allow a pool to withdraw unbonded if its not destroying. If the pool // is destroying then `withdraw_unbonded` can be used. ensure!(pool.state != PoolState::Destroying, Error::::NotDestroying); - T::Staking::withdraw_unbonded(pool.bonded_account(), num_slashing_spans)?; + T::StakingInterface::withdraw_unbonded(pool.bonded_account(), num_slashing_spans)?; Ok(()) } @@ -1756,21 +1738,21 @@ pub mod pallet { #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) )] + #[transactional] pub fn withdraw_unbonded( origin: OriginFor, - member_account: AccountIdLookupOf, + member_account: T::AccountId, num_slashing_spans: u32, ) -> DispatchResultWithPostInfo { let caller = ensure_signed(origin)?; - let member_account = T::Lookup::lookup(member_account)?; let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; - let current_era = T::Staking::current_era(); + let current_era = T::StakingInterface::current_era(); let bonded_pool = BondedPool::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::PoolNotFound.into())?; - let mut sub_pools = - SubPoolsStorage::::get(member.pool_id).ok_or(Error::::SubPoolsNotFound)?; + let mut sub_pools = SubPoolsStorage::::get(member.pool_id) + .defensive_ok_or::>(DefensiveError::SubPoolsNotFound.into())?; bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; @@ -1778,10 +1760,12 @@ pub mod pallet { let withdrawn_points = member.withdraw_unlocked(current_era); ensure!(!withdrawn_points.is_empty(), Error::::CannotWithdrawAny); - // Before calculating the `balance_to_unbond`, we call withdraw unbonded to ensure the + // Before calculate the `balance_to_unbond`, with call withdraw unbonded to ensure the // `transferrable_balance` is correct. - let stash_killed = - T::Staking::withdraw_unbonded(bonded_pool.bonded_account(), num_slashing_spans)?; + let stash_killed = T::StakingInterface::withdraw_unbonded( + bonded_pool.bonded_account(), + num_slashing_spans, + )?; // defensive-only: the depositor puts enough funds into the stash so that it will only // be destroyed when they are leaving. @@ -1807,13 +1791,13 @@ pub mod pallet { accumulator.saturating_add(sub_pools.no_era.dissolve(*unlocked_points)) } }) - // A call to this transaction may cause the pool's stash to get dusted. If this - // happens before the last member has withdrawn, then all subsequent withdraws will - // be 0. However the unbond pools do no get updated to reflect this. In the - // aforementioned scenario, this check ensures we don't try to withdraw funds that - // don't exist. This check is also defensive in cases where the unbond pool does not - // update its balance (e.g. a bug in the slashing hook.) We gracefully proceed in - // order to ensure members can leave the pool and it can be destroyed. + // A call to this function may cause the pool's stash to get dusted. If this happens + // before the last member has withdrawn, then all subsequent withdraws will be 0. + // However the unbond pools do no get updated to reflect this. In the aforementioned + // scenario, this check ensures we don't try to withdraw funds that don't exist. + // This check is also defensive in cases where the unbond pool does not update its + // balance (e.g. a bug in the slashing hook.) We gracefully proceed in order to + // ensure members can leave the pool and it can be destroyed. .min(bonded_pool.transferrable_balance()); T::Currency::transfer( @@ -1875,44 +1859,81 @@ pub mod pallet { /// In addition to `amount`, the caller will transfer the existential deposit; so the caller /// needs at have at least `amount + existential_deposit` transferrable. #[pallet::weight(T::WeightInfo::create())] + #[transactional] pub fn create( origin: OriginFor, #[pallet::compact] amount: BalanceOf, - root: AccountIdLookupOf, - nominator: AccountIdLookupOf, - state_toggler: AccountIdLookupOf, + root: T::AccountId, + nominator: T::AccountId, + state_toggler: T::AccountId, ) -> DispatchResult { - let depositor = ensure_signed(origin)?; + let who = ensure_signed(origin)?; - let pool_id = LastPoolId::::try_mutate::<_, Error, _>(|id| { - *id = id.checked_add(1).ok_or(Error::::OverflowRisk)?; - Ok(*id) - })?; + ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); + ensure!( + MaxPools::::get() + .map_or(true, |max_pools| BondedPools::::count() < max_pools), + Error::::MaxPools + ); + ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); - Self::do_create(depositor, amount, root, nominator, state_toggler, pool_id) - } + let pool_id = LastPoolId::::mutate(|id| { + *id += 1; + *id + }); + let mut bonded_pool = BondedPool::::new( + pool_id, + PoolRoles { + root: Some(root), + nominator: Some(nominator), + state_toggler: Some(state_toggler), + depositor: who.clone(), + }, + ); - /// Create a new delegation pool with a previously used pool id - /// - /// # Arguments - /// - /// same as `create` with the inclusion of - /// * `pool_id` - `A valid PoolId. - #[pallet::weight(T::WeightInfo::create())] - pub fn create_with_pool_id( - origin: OriginFor, - #[pallet::compact] amount: BalanceOf, - root: AccountIdLookupOf, - nominator: AccountIdLookupOf, - state_toggler: AccountIdLookupOf, - pool_id: PoolId, - ) -> DispatchResult { - let depositor = ensure_signed(origin)?; + bonded_pool.try_inc_members()?; + let points = bonded_pool.try_bond_funds(&who, amount, BondType::Create)?; + + T::Currency::transfer( + &who, + &bonded_pool.reward_account(), + T::Currency::minimum_balance(), + ExistenceRequirement::AllowDeath, + )?; + + PoolMembers::::insert( + who.clone(), + PoolMember:: { + pool_id, + points, + last_recorded_reward_counter: Zero::zero(), + unbonding_eras: Default::default(), + }, + ); + RewardPools::::insert( + pool_id, + RewardPool:: { + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: Zero::zero(), + total_rewards_claimed: Zero::zero(), + }, + ); + ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); + + Self::deposit_event(Event::::Created { + depositor: who.clone(), + pool_id: pool_id.clone(), + }); - ensure!(!BondedPools::::contains_key(pool_id), Error::::PoolIdInUse); - ensure!(pool_id < LastPoolId::::get(), Error::::InvalidPoolId); + Self::deposit_event(Event::::Bonded { + member: who, + pool_id, + bonded: amount, + joined: true, + }); + bonded_pool.put(); - Self::do_create(depositor, amount, root, nominator, state_toggler, pool_id) + Ok(()) } /// Nominate on behalf of the pool. @@ -1923,6 +1944,7 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] + #[transactional] pub fn nominate( origin: OriginFor, pool_id: PoolId, @@ -1931,7 +1953,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); - T::Staking::nominate(&bonded_pool.bonded_account(), validators) + T::StakingInterface::nominate(bonded_pool.bonded_account(), validators) } /// Set a new state for the pool. @@ -1945,6 +1967,7 @@ pub mod pallet { /// 2. if the pool conditions to be open are NOT met (as described by `ok_to_be_open`), and /// then the state of the pool can be permissionlessly changed to `Destroying`. #[pallet::weight(T::WeightInfo::set_state())] + #[transactional] pub fn set_state( origin: OriginFor, pool_id: PoolId, @@ -1956,7 +1979,9 @@ pub mod pallet { if bonded_pool.can_toggle_state(&who) { bonded_pool.set_state(state); - } else if bonded_pool.ok_to_be_open().is_err() && state == PoolState::Destroying { + } else if bonded_pool.ok_to_be_open(Zero::zero()).is_err() && + state == PoolState::Destroying + { // If the pool has bad properties, then anyone can set it as destroying bonded_pool.set_state(PoolState::Destroying); } else { @@ -1973,6 +1998,7 @@ pub mod pallet { /// The dispatch origin of this call must be signed by the state toggler, or the root role /// of the pool. #[pallet::weight(T::WeightInfo::set_metadata(metadata.len() as u32))] + #[transactional] pub fn set_metadata( origin: OriginFor, pool_id: PoolId, @@ -2004,6 +2030,7 @@ pub mod pallet { /// * `max_members` - Set [`MaxPoolMembers`]. /// * `max_members_per_pool` - Set [`MaxPoolMembersPerPool`]. #[pallet::weight(T::WeightInfo::set_configs())] + #[transactional] pub fn set_configs( origin: OriginFor, min_join_bond: ConfigOp>, @@ -2040,6 +2067,7 @@ pub mod pallet { /// It emits an event, notifying UIs of the role change. This event is quite relevant to /// most pool members and they should be informed of changes to pool roles. #[pallet::weight(T::WeightInfo::update_roles())] + #[transactional] pub fn update_roles( origin: OriginFor, pool_id: PoolId, @@ -2092,28 +2120,24 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::chill())] + #[transactional] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); - T::Staking::chill(&bonded_pool.bonded_account()) + T::StakingInterface::chill(bonded_pool.bonded_account()) } } #[pallet::hooks] impl Hooks> for Pallet { - #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumberFor) -> Result<(), &'static str> { - Self::do_try_state(u8::MAX) - } - fn integrity_test() { assert!( T::MaxPointsToBalance::get() > 0, "Minimum points to balance ratio must be greater than 0" ); assert!( - T::Staking::bonding_duration() < TotalUnbondingPools::::get(), + T::StakingInterface::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / so a slash can be applied to relevant unboding pools. (We assume / the bonding duration > slash deffer duration.", @@ -2125,20 +2149,16 @@ pub mod pallet { impl Pallet { /// Returns the pending rewards for the specified `member_account`. /// - /// In the case of error, `None` is returned. - pub fn pending_rewards(member_account: T::AccountId) -> Option> { + /// In the case of error the function returns balance of zero. + pub fn pending_rewards(member_account: T::AccountId) -> BalanceOf { if let Some(pool_member) = PoolMembers::::get(member_account) { - if let Some((reward_pool, bonded_pool)) = RewardPools::::get(pool_member.pool_id) - .zip(BondedPools::::get(pool_member.pool_id)) - { - let current_reward_counter = reward_pool - .current_reward_counter(pool_member.pool_id, bonded_pool.points) - .ok()?; - return pool_member.pending_rewards(current_reward_counter).ok() + if let Some(reward_pool) = RewardPools::::get(pool_member.pool_id) { + return pool_member + .pending_rewards(reward_pool.last_recorded_reward_counter()) + .unwrap_or_default() } } - - None + BalanceOf::::default() } /// The amount of bond that MUST REMAIN IN BONDED in ALL POOLS. @@ -2148,16 +2168,15 @@ impl Pallet { /// /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former /// is coming from the staking pallet and the latter two are configured in this pallet. - pub fn depositor_min_bond() -> BalanceOf { - T::Staking::minimum_nominator_bond() + fn depositor_min_bond() -> BalanceOf { + T::StakingInterface::minimum_bond() .max(MinCreateBond::::get()) .max(MinJoinBond::::get()) - .max(T::Currency::minimum_balance()) } /// Remove everything related to the given bonded pool. /// - /// Metadata and all of the sub-pools are also deleted. All accounts are dusted and the leftover - /// of the reward account is returned to the depositor. + /// All sub-pools are also deleted. All accounts are dusted and the leftover of the reward + /// account is returned to the depositor. pub fn dissolve_pool(bonded_pool: BondedPool) { let reward_account = bonded_pool.reward_account(); let bonded_account = bonded_pool.bonded_account(); @@ -2176,7 +2195,7 @@ impl Pallet { debug_assert_eq!(frame_system::Pallet::::consumers(&reward_account), 0); debug_assert_eq!(frame_system::Pallet::::consumers(&bonded_account), 0); debug_assert_eq!( - T::Staking::total_stake(&bonded_account).unwrap_or_default(), + T::StakingInterface::total_stake(&bonded_account).unwrap_or_default(), Zero::zero() ); @@ -2194,9 +2213,6 @@ impl Pallet { T::Currency::make_free_balance_be(&bonded_pool.bonded_account(), Zero::zero()); Self::deposit_event(Event::::Destroyed { pool_id: bonded_pool.id }); - // Remove bonded pool metadata. - Metadata::::remove(bonded_pool.id); - bonded_pool.remove(); } @@ -2316,8 +2332,6 @@ impl Pallet { &bonded_pool.reward_account(), &member_account, pending_rewards, - // defensive: the depositor has put existential deposit into the pool and it stays - // untouched, reward account shall not die. ExistenceRequirement::AllowDeath, )?; @@ -2330,76 +2344,6 @@ impl Pallet { Ok(pending_rewards) } - fn do_create( - who: T::AccountId, - amount: BalanceOf, - root: AccountIdLookupOf, - nominator: AccountIdLookupOf, - state_toggler: AccountIdLookupOf, - pool_id: PoolId, - ) -> DispatchResult { - let root = T::Lookup::lookup(root)?; - let nominator = T::Lookup::lookup(nominator)?; - let state_toggler = T::Lookup::lookup(state_toggler)?; - - ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); - ensure!( - MaxPools::::get().map_or(true, |max_pools| BondedPools::::count() < max_pools), - Error::::MaxPools - ); - ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); - let mut bonded_pool = BondedPool::::new( - pool_id, - PoolRoles { - root: Some(root), - nominator: Some(nominator), - state_toggler: Some(state_toggler), - depositor: who.clone(), - }, - ); - - bonded_pool.try_inc_members()?; - let points = bonded_pool.try_bond_funds(&who, amount, BondType::Create)?; - - T::Currency::transfer( - &who, - &bonded_pool.reward_account(), - T::Currency::minimum_balance(), - ExistenceRequirement::AllowDeath, - )?; - - PoolMembers::::insert( - who.clone(), - PoolMember:: { - pool_id, - points, - last_recorded_reward_counter: Zero::zero(), - unbonding_eras: Default::default(), - }, - ); - RewardPools::::insert( - pool_id, - RewardPool:: { - last_recorded_reward_counter: Zero::zero(), - last_recorded_total_payouts: Zero::zero(), - total_rewards_claimed: Zero::zero(), - }, - ); - ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); - - Self::deposit_event(Event::::Created { depositor: who.clone(), pool_id }); - - Self::deposit_event(Event::::Bonded { - member: who, - pool_id, - bonded: amount, - joined: true, - }); - bonded_pool.put(); - - Ok(()) - } - /// Ensure the correctness of the state of this pallet. /// /// This should be valid before or after each state transition of this pallet. @@ -2434,9 +2378,9 @@ impl Pallet { /// /// To cater for tests that want to escape parts of these checks, this function is split into /// multiple `level`s, where the higher the level, the more checks we performs. So, - /// `try_state(255)` is the strongest sanity check, and `0` performs no checks. - #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] - pub fn do_try_state(level: u8) -> Result<(), &'static str> { + /// `sanity_check(255)` is the strongest sanity check, and `0` performs no checks. + #[cfg(any(test, debug_assertions))] + pub fn sanity_checks(level: u8) -> Result<(), &'static str> { if level.is_zero() { return Ok(()) } @@ -2453,46 +2397,16 @@ impl Pallet { for id in reward_pools { let account = Self::create_reward_account(id); - assert!( - T::Currency::free_balance(&account) >= T::Currency::minimum_balance(), - "reward pool of {id}: {:?} (ed = {:?})", - T::Currency::free_balance(&account), - T::Currency::minimum_balance() - ); + assert!(T::Currency::free_balance(&account) >= T::Currency::minimum_balance()); } let mut pools_members = BTreeMap::::new(); - let mut pools_members_pending_rewards = BTreeMap::>::new(); let mut all_members = 0u32; PoolMembers::::iter().for_each(|(_, d)| { - let bonded_pool = BondedPools::::get(d.pool_id).unwrap(); + assert!(BondedPools::::contains_key(d.pool_id)); assert!(!d.total_points().is_zero(), "no member should have zero points: {:?}", d); *pools_members.entry(d.pool_id).or_default() += 1; all_members += 1; - - let reward_pool = RewardPools::::get(d.pool_id).unwrap(); - if !bonded_pool.points.is_zero() { - let current_rc = - reward_pool.current_reward_counter(d.pool_id, bonded_pool.points).unwrap(); - *pools_members_pending_rewards.entry(d.pool_id).or_default() += - d.pending_rewards(current_rc).unwrap(); - } // else this pool has been heavily slashed and cannot have any rewards anymore. - }); - - RewardPools::::iter_keys().for_each(|id| { - // the sum of the pending rewards must be less than the leftover balance. Since the - // reward math rounds down, we might accumulate some dust here. - log!( - trace, - "pool {:?}, sum pending rewards = {:?}, remaining balance = {:?}", - id, - pools_members_pending_rewards.get(&id), - RewardPool::::current_balance(id) - ); - assert!( - RewardPool::::current_balance(id) >= - pools_members_pending_rewards.get(&id).map(|x| *x).unwrap_or_default() - ) }); BondedPools::::iter().for_each(|(id, inner)| { @@ -2523,7 +2437,8 @@ impl Pallet { let subs = SubPoolsStorage::::get(pool_id).unwrap_or_default(); let sum_unbonding_balance = subs.sum_unbonding_balance(); - let bonded_balance = T::Staking::active_stake(&pool_account).unwrap_or_default(); + let bonded_balance = + T::StakingInterface::active_stake(&pool_account).unwrap_or_default(); let total_balance = T::Currency::total_balance(&pool_account); assert!( @@ -2536,7 +2451,6 @@ impl Pallet { sum_unbonding_balance ); } - Ok(()) } @@ -2550,15 +2464,14 @@ impl Pallet { member: T::AccountId, ) -> DispatchResult { let points = PoolMembers::::get(&member).map(|d| d.active_points()).unwrap_or_default(); - let member_lookup = T::Lookup::unlookup(member); - Self::unbond(origin, member_lookup, points) + Self::unbond(origin, member, points) } } impl OnStakerSlash> for Pallet { fn on_slash( pool_account: &T::AccountId, - // Bonded balance is always read directly from staking, therefore we don't need to update + // Bonded balance is always read directly from staking, therefore we need not update // anything here. slashed_bonded: BalanceOf, slashed_unlocking: &BTreeMap>, diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index b73141c95f72c..f2abfe29dfbf7 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -18,7 +18,7 @@ use super::*; use crate::log; use frame_support::traits::OnRuntimeUpgrade; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; +use sp_std::collections::btree_map::BTreeMap; pub mod v1 { use super::*; @@ -97,10 +97,9 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { // new version must be set. assert_eq!(Pallet::::on_chain_storage_version(), 1); - Pallet::::try_state(frame_system::Pallet::::block_number())?; Ok(()) } } @@ -119,7 +118,7 @@ pub mod v2 { ExtBuilder::default().build_and_execute(|| { let join = |x| { Balances::make_free_balance_be(&x, Balances::minimum_balance() + 10); - frame_support::assert_ok!(Pools::join(RuntimeOrigin::signed(x), 10, 1)); + frame_support::assert_ok!(Pools::join(Origin::signed(x), 10, 1)); }; assert_eq!(BondedPool::::get(1).unwrap().points, 10); @@ -245,7 +244,7 @@ pub mod v2 { }, }; - total_value_locked += bonded_pool.points_to_balance(*points); + total_value_locked += bonded_pool.points_to_balance(points.clone()); let portion = Perbill::from_rational(*points, bonded_pool.points); let last_claim = portion * accumulated_reward; @@ -321,7 +320,6 @@ pub mod v2 { current ); current.put::>(); - T::DbWeight::get().reads_writes(members_translated + 1, reward_pools_translated + 1) } } @@ -347,7 +345,7 @@ pub mod v2 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result<(), &'static str> { // all reward accounts must have more than ED. RewardPools::::iter().for_each(|(id, _)| { assert!( @@ -356,11 +354,11 @@ pub mod v2 { ) }); - Ok(Vec::new()) + Ok(()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { // new version must be set. assert_eq!(Pallet::::on_chain_storage_version(), 2); @@ -385,67 +383,3 @@ pub mod v2 { } } } - -pub mod v3 { - use super::*; - - /// This migration removes stale bonded-pool metadata, if any. - pub struct MigrateToV3(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV3 { - fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - - log!( - info, - "Running migration with current storage version {:?} / onchain {:?}", - current, - onchain - ); - - if current > onchain { - let mut metadata_iterated = 0u64; - let mut metadata_removed = 0u64; - Metadata::::iter_keys() - .filter(|id| { - metadata_iterated += 1; - !BondedPools::::contains_key(&id) - }) - .collect::>() - .into_iter() - .for_each(|id| { - metadata_removed += 1; - Metadata::::remove(&id); - }); - current.put::>(); - // metadata iterated + bonded pools read + a storage version read - let total_reads = metadata_iterated * 2 + 1; - // metadata removed + a storage version write - let total_writes = metadata_removed + 1; - T::DbWeight::get().reads_writes(total_reads, total_writes) - } else { - log!(info, "MigrateToV3 should be removed"); - T::DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - ensure!( - Pallet::::current_storage_version() > Pallet::::on_chain_storage_version(), - "the on_chain version is equal or more than the current one" - ); - Ok(Vec::new()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { - ensure!( - Metadata::::iter_keys().all(|id| BondedPools::::contains_key(&id)), - "not all of the stale metadata has been removed" - ); - ensure!(Pallet::::on_chain_storage_version() == 3, "wrong storage version"); - Ok(()) - } - } -} diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 99d521df3241b..5138c55afccac 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -3,9 +3,7 @@ use crate::{self as pools}; use frame_support::{assert_ok, parameter_types, PalletId}; use frame_system::RawOrigin; use sp_runtime::FixedU128; -use sp_staking::Stake; -pub type BlockNumber = u64; pub type AccountId = u128; pub type Balance = u128; pub type RewardCounter = FixedU128; @@ -48,17 +46,10 @@ impl sp_staking::StakingInterface for StakingMock { type Balance = Balance; type AccountId = AccountId; - fn minimum_nominator_bond() -> Self::Balance { - StakingMinBond::get() - } - fn minimum_validator_bond() -> Self::Balance { + fn minimum_bond() -> Self::Balance { StakingMinBond::get() } - fn desired_validator_count() -> u32 { - unimplemented!("method currently not used in testing") - } - fn current_era() -> EraIndex { CurrentEra::get() } @@ -67,24 +58,39 @@ impl sp_staking::StakingInterface for StakingMock { BondingDuration::get() } - fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { + fn active_stake(who: &Self::AccountId) -> Option { + BondedBalanceMap::get().get(who).map(|v| *v) + } + + fn total_stake(who: &Self::AccountId) -> Option { + match ( + UnbondingBalanceMap::get().get(who).map(|v| *v), + BondedBalanceMap::get().get(who).map(|v| *v), + ) { + (None, None) => None, + (Some(v), None) | (None, Some(v)) => Some(v), + (Some(a), Some(b)) => Some(a + b), + } + } + + fn bond_extra(who: Self::AccountId, extra: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); - x.get_mut(who).map(|v| *v += extra); + x.get_mut(&who).map(|v| *v += extra); BondedBalanceMap::set(&x); Ok(()) } - fn unbond(who: &Self::AccountId, amount: Self::Balance) -> DispatchResult { + fn unbond(who: Self::AccountId, amount: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); - *x.get_mut(who).unwrap() = x.get_mut(who).unwrap().saturating_sub(amount); + *x.get_mut(&who).unwrap() = x.get_mut(&who).unwrap().saturating_sub(amount); BondedBalanceMap::set(&x); let mut y = UnbondingBalanceMap::get(); - *y.entry(*who).or_insert(Self::Balance::zero()) += amount; + *y.entry(who).or_insert(Self::Balance::zero()) += amount; UnbondingBalanceMap::set(&y); Ok(()) } - fn chill(_: &Self::AccountId) -> sp_runtime::DispatchResult { + fn chill(_: Self::AccountId) -> sp_runtime::DispatchResult { Ok(()) } @@ -97,12 +103,17 @@ impl sp_staking::StakingInterface for StakingMock { Ok(UnbondingBalanceMap::get().is_empty() && BondedBalanceMap::get().is_empty()) } - fn bond(stash: &Self::AccountId, value: Self::Balance, _: &Self::AccountId) -> DispatchResult { - StakingMock::set_bonded_balance(*stash, value); + fn bond( + stash: Self::AccountId, + _: Self::AccountId, + value: Self::Balance, + _: Self::AccountId, + ) -> DispatchResult { + StakingMock::set_bonded_balance(stash, value); Ok(()) } - fn nominate(_: &Self::AccountId, nominations: Vec) -> DispatchResult { + fn nominate(_: Self::AccountId, nominations: Vec) -> DispatchResult { Nominations::set(&Some(nominations)); Ok(()) } @@ -111,63 +122,21 @@ impl sp_staking::StakingInterface for StakingMock { fn nominations(_: Self::AccountId) -> Option> { Nominations::get() } - - fn stash_by_ctrl(_controller: &Self::AccountId) -> Result { - unimplemented!("method currently not used in testing") - } - - fn stake(who: &Self::AccountId) -> Result, DispatchError> { - match ( - UnbondingBalanceMap::get().get(who).map(|v| *v), - BondedBalanceMap::get().get(who).map(|v| *v), - ) { - (None, None) => Err(DispatchError::Other("balance not found")), - (Some(v), None) => Ok(Stake { total: v, active: 0, stash: *who }), - (None, Some(v)) => Ok(Stake { total: v, active: v, stash: *who }), - (Some(a), Some(b)) => Ok(Stake { total: a + b, active: b, stash: *who }), - } - } - - fn election_ongoing() -> bool { - unimplemented!("method currently not used in testing") - } - - fn force_unstake(_who: Self::AccountId) -> sp_runtime::DispatchResult { - unimplemented!("method currently not used in testing") - } - - fn is_exposed_in_era(_who: &Self::AccountId, _era: &EraIndex) -> bool { - unimplemented!("method currently not used in testing") - } - - #[cfg(feature = "runtime-benchmarks")] - fn add_era_stakers( - _current_era: &EraIndex, - _stash: &Self::AccountId, - _exposures: Vec<(Self::AccountId, Self::Balance)>, - ) { - unimplemented!("method currently not used in testing") - } - - #[cfg(feature = "runtime-benchmarks")] - fn set_current_era(_era: EraIndex) { - unimplemented!("method currently not used in testing") - } } impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; - type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type BlockNumber = u64; + type Call = Call; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -191,7 +160,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -219,13 +188,14 @@ parameter_types! { pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); } impl pools::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; type RewardCounter = RewardCounter; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = StakingMock; + type StakingInterface = StakingMock; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type PalletId = PoolsPalletId; type MaxMetadataLen = MaxMetadataLen; @@ -259,45 +229,44 @@ impl Default for ExtBuilder { } } -#[cfg_attr(feature = "fuzzing", allow(dead_code))] impl ExtBuilder { // Add members to pool 0. - pub fn add_members(mut self, members: Vec<(AccountId, Balance)>) -> Self { + pub(crate) fn add_members(mut self, members: Vec<(AccountId, Balance)>) -> Self { self.members = members; self } - pub fn ed(self, ed: Balance) -> Self { + pub(crate) fn ed(self, ed: Balance) -> Self { ExistentialDeposit::set(ed); self } - pub fn min_bond(self, min: Balance) -> Self { + pub(crate) fn min_bond(self, min: Balance) -> Self { StakingMinBond::set(min); self } - pub fn min_join_bond(self, min: Balance) -> Self { + pub(crate) fn min_join_bond(self, min: Balance) -> Self { MinJoinBondConfig::set(min); self } - pub fn with_check(self, level: u8) -> Self { + pub(crate) fn with_check(self, level: u8) -> Self { CheckLevel::set(level); self } - pub fn max_members(mut self, max: Option) -> Self { + pub(crate) fn max_members(mut self, max: Option) -> Self { self.max_members = max; self } - pub fn max_members_per_pool(mut self, max: Option) -> Self { + pub(crate) fn max_members_per_pool(mut self, max: Option) -> Self { self.max_members_per_pool = max; self } - pub fn build(self) -> sp_io::TestExternalities { + pub(crate) fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -321,7 +290,7 @@ impl ExtBuilder { let amount_to_bond = Pools::depositor_min_bond(); Balances::make_free_balance_be(&10, amount_to_bond * 5); assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); - assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1])); + let last_pool = LastPoolId::::get(); for (account_id, bonded) in self.members { Balances::make_free_balance_be(&account_id, bonded * 2); @@ -335,12 +304,12 @@ impl ExtBuilder { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { self.build().execute_with(|| { test(); - Pools::do_try_state(CheckLevel::get()).unwrap(); + Pools::sanity_checks(CheckLevel::get()).unwrap(); }) } } -pub fn unsafe_set_state(pool_id: PoolId, state: PoolState) { +pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) { BondedPools::::try_mutate(pool_id, |maybe_bonded_pool| { maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { bonded_pool.state = state; @@ -355,11 +324,11 @@ parameter_types! { } /// All events of this pallet. -pub fn pool_events_since_last_call() -> Vec> { +pub(crate) fn pool_events_since_last_call() -> Vec> { let events = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::Pools(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::Pools(inner) = e { Some(inner) } else { None }) .collect::>(); let already_seen = PoolsEvents::get(); PoolsEvents::set(&(events.len() as u32)); @@ -367,11 +336,11 @@ pub fn pool_events_since_last_call() -> Vec> { } /// All events of the `Balances` pallet. -pub fn balances_events_since_last_call() -> Vec> { +pub(crate) fn balances_events_since_last_call() -> Vec> { let events = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::Balances(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::Balances(inner) = e { Some(inner) } else { None }) .collect::>(); let already_seen = BalancesEvents::get(); BalancesEvents::set(&(events.len() as u32)); @@ -383,7 +352,7 @@ pub fn fully_unbond_permissioned(member: AccountId) -> DispatchResult { let points = PoolMembers::::get(&member) .map(|d| d.active_points()) .unwrap_or_default(); - Pools::unbond(RuntimeOrigin::signed(member), member, points) + Pools::unbond(Origin::signed(member), member, points) } #[cfg(test)] diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 7d5d418bbf2c8..5aa8e97266e0d 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -25,7 +25,7 @@ macro_rules! unbonding_pools_with_era { ($($k:expr => $v:expr),* $(,)?) => {{ use sp_std::iter::{Iterator, IntoIterator}; let not_bounded: BTreeMap<_, _> = Iterator::collect(IntoIterator::into_iter([$(($k, $v),)*])); - BoundedBTreeMap::, TotalUnbondingPools>::try_from(not_bounded).unwrap() + UnbondingPoolsWithEra::try_from(not_bounded).unwrap() }}; } @@ -47,7 +47,6 @@ fn test_setup_works() { assert_eq!(SubPoolsStorage::::count(), 0); assert_eq!(PoolMembers::::count(), 1); assert_eq!(StakingMock::bonding_duration(), 3); - assert!(Metadata::::contains_key(1)); let last_pool = LastPoolId::::get(); assert_eq!( @@ -213,30 +212,31 @@ mod bonded_pool { // Simulate a 100% slashed pool StakingMock::set_bonded_balance(pool.bonded_account(), 0); - assert_noop!(pool.ok_to_join(), Error::::OverflowRisk); + assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); // Simulate a slashed pool at `MaxPointsToBalance` + 1 slashed pool StakingMock::set_bonded_balance( pool.bonded_account(), max_points_to_balance.saturating_add(1).into(), ); - assert_ok!(pool.ok_to_join()); + assert_ok!(pool.ok_to_join(0)); // Simulate a slashed pool at `MaxPointsToBalance` StakingMock::set_bonded_balance(pool.bonded_account(), max_points_to_balance); - assert_noop!(pool.ok_to_join(), Error::::OverflowRisk); + assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); StakingMock::set_bonded_balance( pool.bonded_account(), Balance::MAX / max_points_to_balance, ); - + // New bonded balance would be over threshold of Balance type + assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); // and a sanity check StakingMock::set_bonded_balance( pool.bonded_account(), Balance::MAX / max_points_to_balance - 1, ); - assert_ok!(pool.ok_to_join()); + assert_ok!(pool.ok_to_join(0)); }); } } @@ -436,13 +436,13 @@ mod join { roles: DEFAULT_ROLES, }, }; - ExtBuilder::default().with_check(0).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { // Given Balances::make_free_balance_be(&11, ExistentialDeposit::get() + 2); assert!(!PoolMembers::::contains_key(&11)); // When - assert_ok!(Pools::join(RuntimeOrigin::signed(11), 2, 1)); + assert_ok!(Pools::join(Origin::signed(11), 2, 1)); // Then @@ -470,7 +470,7 @@ mod join { assert!(!PoolMembers::::contains_key(&12)); // When - assert_ok!(Pools::join(RuntimeOrigin::signed(12), 12, 1)); + assert_ok!(Pools::join(Origin::signed(12), 12, 1)); // Then assert_eq!( @@ -493,21 +493,15 @@ mod join { assert_eq!(PoolMembers::::get(&10).unwrap().pool_id, 1); assert_noop!( - Pools::join(RuntimeOrigin::signed(10), 420, 123), + Pools::join(Origin::signed(10), 420, 123), Error::::AccountBelongsToOtherPool ); - assert_noop!( - Pools::join(RuntimeOrigin::signed(11), 420, 123), - Error::::PoolNotFound - ); + assert_noop!(Pools::join(Origin::signed(11), 420, 123), Error::::PoolNotFound); // Force the pools bonded balance to 0, simulating a 100% slash StakingMock::set_bonded_balance(Pools::create_bonded_account(1), 0); - assert_noop!( - Pools::join(RuntimeOrigin::signed(11), 420, 1), - Error::::OverflowRisk - ); + assert_noop!(Pools::join(Origin::signed(11), 420, 1), Error::::OverflowRisk); // Given a mocked bonded pool BondedPool:: { @@ -532,33 +526,27 @@ mod join { Pools::create_bonded_account(123), max_points_to_balance, ); - assert_noop!( - Pools::join(RuntimeOrigin::signed(11), 420, 123), - Error::::OverflowRisk - ); + assert_noop!(Pools::join(Origin::signed(11), 420, 123), Error::::OverflowRisk); StakingMock::set_bonded_balance( Pools::create_bonded_account(123), Balance::MAX / max_points_to_balance, ); // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` - assert_noop!( - Pools::join(RuntimeOrigin::signed(11), 5, 123), - pallet_balances::Error::::InsufficientBalance, - ); + assert_noop!(Pools::join(Origin::signed(11), 5, 123), Error::::OverflowRisk); StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); // Cannot join a pool that isn't open unsafe_set_state(123, PoolState::Blocked); assert_noop!( - Pools::join(RuntimeOrigin::signed(11), max_points_to_balance, 123), + Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); unsafe_set_state(123, PoolState::Destroying); assert_noop!( - Pools::join(RuntimeOrigin::signed(11), max_points_to_balance, 123), + Pools::join(Origin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); @@ -567,7 +555,7 @@ mod join { // Then assert_noop!( - Pools::join(RuntimeOrigin::signed(11), 99, 123), + Pools::join(Origin::signed(11), 99, 123), Error::::MinimumBondNotMet ); }); @@ -589,7 +577,7 @@ mod join { }, } .put(); - let _ = Pools::join(RuntimeOrigin::signed(11), 420, 123); + let _ = Pools::join(Origin::signed(11), 420, 123); }); } @@ -602,7 +590,7 @@ mod join { let account = i + 100; Balances::make_free_balance_be(&account, 100 + Balances::minimum_balance()); - assert_ok!(Pools::join(RuntimeOrigin::signed(account), 100, 1)); + assert_ok!(Pools::join(Origin::signed(account), 100, 1)); } Balances::make_free_balance_be(&103, 100 + Balances::minimum_balance()); @@ -619,7 +607,7 @@ mod join { ); assert_noop!( - Pools::join(RuntimeOrigin::signed(103), 100, 1), + Pools::join(Origin::signed(103), 100, 1), Error::::MaxPoolMembers ); @@ -628,7 +616,7 @@ mod join { assert_eq!(MaxPoolMembers::::get(), Some(4)); Balances::make_free_balance_be(&104, 100 + Balances::minimum_balance()); - assert_ok!(Pools::create(RuntimeOrigin::signed(104), 100, 104, 104, 104)); + assert_ok!(Pools::create(Origin::signed(104), 100, 104, 104, 104)); let pool_account = BondedPools::::iter() .find(|(_, bonded_pool)| bonded_pool.roles.depositor == 104) @@ -645,7 +633,7 @@ mod join { ); assert_noop!( - Pools::join(RuntimeOrigin::signed(103), 100, pool_account), + Pools::join(Origin::signed(103), 100, pool_account), Error::::MaxPoolMembers ); }); @@ -703,7 +691,7 @@ mod claim_payout { let _ = pool_events_since_last_call(); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); // Then assert_eq!( @@ -719,7 +707,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 90); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(40))); + assert_ok!(Pools::claim_payout(Origin::signed(40))); // Then assert_eq!( @@ -732,7 +720,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 50); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(50))); + assert_ok!(Pools::claim_payout(Origin::signed(50))); // Then assert_eq!( @@ -748,7 +736,7 @@ mod claim_payout { assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); // Then assert_eq!( @@ -761,7 +749,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 45); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(40))); + assert_ok!(Pools::claim_payout(Origin::signed(40))); // Then assert_eq!( @@ -778,7 +766,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(50))); + assert_ok!(Pools::claim_payout(Origin::signed(50))); // Then assert_eq!( @@ -791,7 +779,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); // Then assert_eq!( @@ -808,7 +796,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); // Then assert_eq!( @@ -827,7 +815,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); // Then assert_eq!( @@ -840,7 +828,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 398); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(40))); + assert_ok!(Pools::claim_payout(Origin::signed(40))); // Then assert_eq!( @@ -853,7 +841,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 210); // When - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(50))); + assert_ok!(Pools::claim_payout(Origin::signed(50))); // Then assert_eq!( @@ -873,10 +861,7 @@ mod claim_payout { // fully unbond the member. assert_ok!(fully_unbond_permissioned(11)); - assert_noop!( - Pools::claim_payout(RuntimeOrigin::signed(11)), - Error::::FullyUnbonding - ); + assert_noop!(Pools::claim_payout(Origin::signed(11)), Error::::FullyUnbonding); assert_eq!( pool_events_since_last_call(), @@ -1155,14 +1140,14 @@ mod claim_payout { // 20 joins afterwards. Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); // reward by another 20 Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); // 10 should claim 10 + 10, 20 should claim 20 / 2. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), vec![ @@ -1177,8 +1162,8 @@ mod claim_payout { // any upcoming rewards are shared equally. Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1198,13 +1183,13 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 3).unwrap(); Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 6).unwrap(); // 10 should claim 3, 20 should claim 3 + 3. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1220,8 +1205,8 @@ mod claim_payout { // any upcoming rewards are shared equally. Balances::mutate_account(&default_reward_account(), |f| f.free += 8).unwrap(); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1234,8 +1219,8 @@ mod claim_payout { // uneven upcoming rewards are shared equally, rounded down. Balances::mutate_account(&default_reward_account(), |f| f.free += 7).unwrap(); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1255,19 +1240,19 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); Balances::make_free_balance_be(&20, ed + 10); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); Balances::make_free_balance_be(&30, ed + 10); - assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); // 10 should claim 10, 20 should claim nothing. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1285,9 +1270,9 @@ mod claim_payout { // any upcoming rewards are shared equally. Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1300,75 +1285,22 @@ mod claim_payout { }); } - #[test] - fn pending_rewards_per_member_works() { - ExtBuilder::default().build_and_execute(|| { - let ed = Balances::minimum_balance(); - - assert_eq!(Pools::pending_rewards(10), Some(0)); - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - assert_eq!(Pools::pending_rewards(10), Some(30)); - assert_eq!(Pools::pending_rewards(20), None); - - Balances::make_free_balance_be(&20, ed + 10); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); - - assert_eq!(Pools::pending_rewards(10), Some(30)); - assert_eq!(Pools::pending_rewards(20), Some(0)); - - Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - - assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); - assert_eq!(Pools::pending_rewards(20), Some(50)); - assert_eq!(Pools::pending_rewards(30), None); - - Balances::make_free_balance_be(&30, ed + 10); - assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); - - assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); - assert_eq!(Pools::pending_rewards(20), Some(50)); - assert_eq!(Pools::pending_rewards(30), Some(0)); - - Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); - - assert_eq!(Pools::pending_rewards(10), Some(30 + 50 + 20)); - assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); - assert_eq!(Pools::pending_rewards(30), Some(20)); - - // 10 should claim 10, 20 should claim nothing. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_eq!(Pools::pending_rewards(10), Some(0)); - assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); - assert_eq!(Pools::pending_rewards(30), Some(20)); - - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - assert_eq!(Pools::pending_rewards(10), Some(0)); - assert_eq!(Pools::pending_rewards(20), Some(0)); - assert_eq!(Pools::pending_rewards(30), Some(20)); - - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); - assert_eq!(Pools::pending_rewards(10), Some(0)); - assert_eq!(Pools::pending_rewards(20), Some(0)); - assert_eq!(Pools::pending_rewards(30), Some(0)); - }); - } - #[test] fn rewards_distribution_is_fair_bond_extra() { ExtBuilder::default().build_and_execute(|| { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 20); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); Balances::make_free_balance_be(&30, ed + 20); - assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); // everyone claims. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1384,14 +1316,14 @@ mod claim_payout { ); // 30 now bumps itself to be like 20. - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(30), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(30), BondExtra::FreeBalance(10))); // more rewards come in. Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1411,13 +1343,13 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 20); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); // everyone claims. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1431,13 +1363,13 @@ mod claim_payout { ); // 20 unbonds to be equal to 10 (10 points each). - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); // more rewards come in. Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1456,16 +1388,16 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 20); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); Balances::make_free_balance_be(&30, ed + 20); - assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); // 10 gets 10, 20 gets 20, 30 gets 10 Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1483,8 +1415,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1498,8 +1430,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1510,7 +1442,7 @@ mod claim_payout { ); // now 30 claims all at once - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1525,13 +1457,13 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 200); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); // 10 gets 10, 20 gets 20, 30 gets 10 Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); assert_eq!( pool_events_since_last_call(), @@ -1547,11 +1479,11 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); // and 20 bonds more -- they should not have more share of this reward. - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); // everyone claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1567,8 +1499,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); // everyone claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1598,7 +1530,7 @@ mod claim_payout { // create pool 2 Balances::make_free_balance_be(&20, 100); - assert_ok!(Pools::create(RuntimeOrigin::signed(20), 10, 20, 20, 20)); + assert_ok!(Pools::create(Origin::signed(20), 10, 20, 20, 20)); // has no impact -- initial let (member_20, _, reward_pool_20) = Pools::get_member_with_pools(&20).unwrap(); @@ -1614,7 +1546,7 @@ mod claim_payout { // create pool 3 Balances::make_free_balance_be(&30, 100); - assert_ok!(Pools::create(RuntimeOrigin::signed(30), 10, 30, 30, 30)); + assert_ok!(Pools::create(Origin::signed(30), 10, 30, 30, 30)); // reward counter is still the same. let (member_30, _, reward_pool_30) = Pools::get_member_with_pools(&30).unwrap(); @@ -1630,7 +1562,7 @@ mod claim_payout { assert_eq!(member_30.last_recorded_reward_counter, 0.into()); // and 30 can claim the reward now. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1654,7 +1586,7 @@ mod claim_payout { MaxPoolMembersPerPool::::set(None); let join = |x, y| { Balances::make_free_balance_be(&x, y + Balances::minimum_balance()); - assert_ok!(Pools::join(RuntimeOrigin::signed(x), y, 1)); + assert_ok!(Pools::join(Origin::signed(x), y, 1)); }; { @@ -1736,10 +1668,7 @@ mod claim_payout { // 10 bonds extra without any rewards. { - assert_ok!(Pools::bond_extra( - RuntimeOrigin::signed(10), - BondExtra::FreeBalance(10) - )); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); assert_eq!(member.last_recorded_reward_counter, 0.into()); assert_eq!(reward_pool.last_recorded_total_payouts, 0); @@ -1751,10 +1680,7 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); { - assert_ok!(Pools::bond_extra( - RuntimeOrigin::signed(10), - BondExtra::FreeBalance(10) - )); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); // explanation: before bond_extra takes place, there is 40 points and 30 balance in // the system, RewardCounter is therefore 7.5 @@ -1768,10 +1694,7 @@ mod claim_payout { // 20 bonds extra again, without further rewards. { - assert_ok!(Pools::bond_extra( - RuntimeOrigin::signed(20), - BondExtra::FreeBalance(10) - )); + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); assert_eq!(member.last_recorded_reward_counter, RewardCounter::from_float(0.75)); assert_eq!( @@ -1797,54 +1720,6 @@ mod claim_payout { }) } - #[test] - fn bond_extra_pending_rewards_works() { - ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { - MaxPoolMembers::::set(None); - MaxPoolMembersPerPool::::set(None); - - // pool receives some rewards. - Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - System::reset_events(); - - // 10 cashes it out, and bonds it. - { - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); - // there is 30 points and 30 reward points in the system RC is 1. - assert_eq!(member.last_recorded_reward_counter, 1.into()); - assert_eq!(reward_pool.total_rewards_claimed, 10); - // these two are not updated -- only updated when the points change. - assert_eq!(reward_pool.last_recorded_total_payouts, 0); - assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); - - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] - ); - } - - // 20 re-bonds it. - { - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::Rewards)); - let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); - assert_eq!(member.last_recorded_reward_counter, 1.into()); - assert_eq!(reward_pool.total_rewards_claimed, 30); - // since points change, these two are updated. - assert_eq!(reward_pool.last_recorded_total_payouts, 30); - assert_eq!(reward_pool.last_recorded_reward_counter, 1.into()); - - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::PaidOut { member: 20, pool_id: 1, payout: 20 }, - Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: false } - ] - ); - } - }) - } - #[test] fn unbond_updates_recorded_data() { ExtBuilder::default() @@ -1866,7 +1741,7 @@ mod claim_payout { // 20 unbonds without any rewards. { - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); assert_eq!(member.last_recorded_reward_counter, 0.into()); assert_eq!(reward_pool.last_recorded_total_payouts, 0); @@ -1878,7 +1753,7 @@ mod claim_payout { // and 30 also unbonds half. { - assert_ok!(Pools::unbond(RuntimeOrigin::signed(30), 30, 10)); + assert_ok!(Pools::unbond(Origin::signed(30), 30, 10)); let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); // 30 reward in the system, and 40 points before this unbond to collect it, // RewardCounter is 3/4. @@ -1895,7 +1770,7 @@ mod claim_payout { // 30 unbonds again, not change this time. { - assert_ok!(Pools::unbond(RuntimeOrigin::signed(30), 30, 5)); + assert_ok!(Pools::unbond(Origin::signed(30), 30, 5)); let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); assert_eq!( member.last_recorded_reward_counter, @@ -1910,7 +1785,7 @@ mod claim_payout { // 20 unbonds again, not change this time, just collecting their reward. { - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); assert_eq!( member.last_recorded_reward_counter, @@ -1924,7 +1799,7 @@ mod claim_payout { } // trigger 10's reward as well to see all of the payouts. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); assert_eq!( pool_events_since_last_call(), @@ -1960,8 +1835,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); // everyone claims - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); // some dust (1) remains in the reward account. assert_eq!( @@ -1976,15 +1851,15 @@ mod claim_payout { ); // start dismantling the pool. - assert_ok!(Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(Origin::signed(902), 1, PoolState::Destroying)); assert_ok!(fully_unbond_permissioned(20)); CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); assert_ok!(fully_unbond_permissioned(10)); CurrentEra::set(6); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), @@ -2000,7 +1875,6 @@ mod claim_payout { ] ); - assert!(!Metadata::::contains_key(1)); // original ed + ed put into reward account + reward + bond + dust. assert_eq!(Balances::free_balance(&10), 35 + 5 + 13 + 10 + 1); }) @@ -2023,10 +1897,10 @@ mod claim_payout { .unwrap(); // everyone claims - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(21))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(22))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(21))); + assert_ok!(Pools::claim_payout(Origin::signed(22))); assert_eq!( pool_events_since_last_call(), @@ -2080,18 +1954,18 @@ mod unbond { .add_members(vec![(20, 20)]) .build_and_execute(|| { // can unbond to above limit - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 5); // cannot go to below 10: assert_noop!( - Pools::unbond(RuntimeOrigin::signed(20), 20, 10), + Pools::unbond(Origin::signed(20), 20, 10), Error::::MinimumBondNotMet ); // but can go to 0 - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 15)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 15)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); }) @@ -2112,23 +1986,23 @@ mod unbond { // cannot be kicked to above the limit. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(kicker), 20, 5), + Pools::unbond(Origin::signed(kicker), 20, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // cannot go to below 10: assert_noop!( - Pools::unbond(RuntimeOrigin::signed(kicker), 20, 15), + Pools::unbond(Origin::signed(kicker), 20, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // but they themselves can do an unbond - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); // can be kicked to 0. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(kicker), 20, 18)); + assert_ok!(Pools::unbond(Origin::signed(kicker), 20, 18)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); }) @@ -2149,23 +2023,23 @@ mod unbond { // cannot be kicked to above the limit. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(random), 20, 5), + Pools::unbond(Origin::signed(random), 20, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // cannot go to below 10: assert_noop!( - Pools::unbond(RuntimeOrigin::signed(random), 20, 15), + Pools::unbond(Origin::signed(random), 20, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // but they themselves can do an unbond - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); // but can go to 0 - assert_ok!(Pools::unbond(RuntimeOrigin::signed(random), 20, 18)); + assert_ok!(Pools::unbond(Origin::signed(random), 20, 18)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); }) @@ -2178,25 +2052,19 @@ mod unbond { // - depositor cannot unbond to below limit or 0 ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // can unbond to above the limit. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); // cannot go to below 10: - assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 10), - Error::::MinimumBondNotMet - ); + assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); // cannot go to 0 either. - assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 15), - Error::::MinimumBondNotMet - ); + assert_noop!(Pools::unbond(Origin::signed(10), 10, 15), Error::::MinimumBondNotMet); }) } @@ -2206,7 +2074,7 @@ mod unbond { // - depositor can never be kicked. ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage @@ -2215,27 +2083,24 @@ mod unbond { // cannot be kicked to above limit. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(kicker), 10, 5), + Pools::unbond(Origin::signed(kicker), 10, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // or below the limit assert_noop!( - Pools::unbond(RuntimeOrigin::signed(kicker), 10, 15), + Pools::unbond(Origin::signed(kicker), 10, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // or 0. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(kicker), 10, 20), + Pools::unbond(Origin::signed(kicker), 10, 20), Error::::DoesNotHavePermission ); // they themselves cannot do it either - assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 20), - Error::::MinimumBondNotMet - ); + assert_noop!(Pools::unbond(Origin::signed(10), 10, 20), Error::::MinimumBondNotMet); }) } @@ -2244,7 +2109,7 @@ mod unbond { // depositor can never be permissionlessly unbonded. ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage @@ -2253,24 +2118,24 @@ mod unbond { // cannot be kicked to above limit. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(random), 10, 5), + Pools::unbond(Origin::signed(random), 10, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // or below the limit assert_noop!( - Pools::unbond(RuntimeOrigin::signed(random), 10, 15), + Pools::unbond(Origin::signed(random), 10, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // or 0. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(random), 10, 20), + Pools::unbond(Origin::signed(random), 10, 20), Error::::DoesNotHavePermission ); // they themselves can do it in this case though. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 20)); }) } @@ -2283,29 +2148,26 @@ mod unbond { .add_members(vec![(20, 20)]) .build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra( - RuntimeOrigin::signed(10), - BondExtra::FreeBalance(10) - )); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage unsafe_set_state(1, PoolState::Destroying); // can go above the limit - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); // but not below the limit assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 10), + Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet ); // and certainly not zero assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 15), + Pools::unbond(Origin::signed(10), 10, 15), Error::::MinimumBondNotMet ); }) @@ -2319,25 +2181,22 @@ mod unbond { // - depositor can unbond to 0 if last and destroying. ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage unsafe_set_state(1, PoolState::Destroying); // can unbond to above the limit. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); // still cannot go to below limit - assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 10), - Error::::MinimumBondNotMet - ); + assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); // can go to 0 too. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 15)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 15)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 20); }) @@ -2464,8 +2323,8 @@ mod unbond { // When CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 40, 0)); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 550, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 40, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 550, 0)); assert_ok!(fully_unbond_permissioned(10)); // Then @@ -2563,12 +2422,12 @@ mod unbond { // When the nominator tries to kick, then its a noop assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(901), 100), + Pools::fully_unbond(Origin::signed(901), 100), Error::::NotKickerOrDestroying ); // When the root kicks then its ok - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(900), 100)); + assert_ok!(Pools::fully_unbond(Origin::signed(900), 100)); assert_eq!( pool_events_since_last_call(), @@ -2588,7 +2447,7 @@ mod unbond { ); // When the state toggler kicks then its ok - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(902), 200)); + assert_ok!(Pools::fully_unbond(Origin::signed(902), 200)); assert_eq!( pool_events_since_last_call(), @@ -2639,13 +2498,13 @@ mod unbond { // A permissionless unbond attempt errors assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(420), 100), + Pools::fully_unbond(Origin::signed(420), 100), Error::::NotKickerOrDestroying ); // permissionless unbond must be full assert_noop!( - Pools::unbond(RuntimeOrigin::signed(420), 100, 80), + Pools::unbond(Origin::signed(420), 100, 80), Error::::PartialUnbondNotAllowedPermissionlessly, ); @@ -2654,12 +2513,12 @@ mod unbond { // The depositor cannot be fully unbonded until they are the last member assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(10), 10), + Pools::fully_unbond(Origin::signed(10), 10), Error::::MinimumBondNotMet, ); // Any account can unbond a member that is not the depositor - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(420), 100)); + assert_ok!(Pools::fully_unbond(Origin::signed(420), 100)); assert_eq!( pool_events_since_last_call(), @@ -2673,7 +2532,7 @@ mod unbond { // still permissionless unbond must be full assert_noop!( - Pools::unbond(RuntimeOrigin::signed(420), 100, 80), + Pools::unbond(Origin::signed(420), 100, 80), Error::::PartialUnbondNotAllowedPermissionlessly, ); @@ -2682,7 +2541,7 @@ mod unbond { // The depositor cannot be unbonded assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(420), 10), + Pools::fully_unbond(Origin::signed(420), 10), Error::::DoesNotHavePermission ); @@ -2691,27 +2550,27 @@ mod unbond { // The depositor cannot be unbonded yet. assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(420), 10), + Pools::fully_unbond(Origin::signed(420), 10), Error::::DoesNotHavePermission, ); // but when everyone is unbonded it can.. CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 100, 0)); // still permissionless unbond must be full. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(420), 10, 5), + Pools::unbond(Origin::signed(420), 10, 5), Error::::PartialUnbondNotAllowedPermissionlessly, ); // depositor can never be unbonded permissionlessly . assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(420), 10), + Pools::fully_unbond(Origin::signed(420), 10), Error::::DoesNotHavePermission ); // but depositor itself can do it. - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(10), 10)); + assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); assert_eq!(BondedPools::::get(1).unwrap().points, 0); assert_eq!( @@ -2734,7 +2593,7 @@ mod unbond { fn unbond_errors_correctly() { ExtBuilder::default().build_and_execute(|| { assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(11), 11), + Pools::fully_unbond(Origin::signed(11), 11), Error::::PoolMemberNotFound ); @@ -2742,7 +2601,7 @@ mod unbond { let member = PoolMember { pool_id: 2, points: 10, ..Default::default() }; PoolMembers::::insert(11, member); - let _ = Pools::fully_unbond(RuntimeOrigin::signed(11), 11); + let _ = Pools::fully_unbond(Origin::signed(11), 11); }); } @@ -2764,13 +2623,13 @@ mod unbond { } .put(); - let _ = Pools::fully_unbond(RuntimeOrigin::signed(11), 11); + let _ = Pools::fully_unbond(Origin::signed(11), 11); }); } #[test] fn partial_unbond_era_tracking() { - ExtBuilder::default().ed(1).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { // to make the depositor capable of withdrawing. StakingMinBond::set(1); MinCreateBond::::set(1); @@ -2794,7 +2653,7 @@ mod unbond { unsafe_set_state(1, PoolState::Destroying); // when: casual unbond - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 9); @@ -2823,7 +2682,7 @@ mod unbond { ); // when: casual further unbond, same era. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 4); @@ -2849,7 +2708,7 @@ mod unbond { // when: casual further unbond, next era. CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 3); @@ -2877,14 +2736,14 @@ mod unbond { // when: unbonding more than our active: error assert_noop!( frame_support::storage::with_storage_layer(|| Pools::unbond( - RuntimeOrigin::signed(10), + Origin::signed(10), 10, 5 )), Error::::MinimumBondNotMet ); // instead: - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 0); @@ -2917,9 +2776,9 @@ mod unbond { MaxUnbonding::set(2); // given - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 3)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); assert_eq!( PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3) @@ -2929,7 +2788,7 @@ mod unbond { CurrentEra::set(2); assert_noop!( frame_support::storage::with_storage_layer(|| Pools::unbond( - RuntimeOrigin::signed(20), + Origin::signed(20), 20, 4 )), @@ -2938,7 +2797,7 @@ mod unbond { // when MaxUnbonding::set(3); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 1)); assert_eq!( PoolMembers::::get(20).unwrap().unbonding_eras, @@ -2971,13 +2830,13 @@ mod unbond { assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 0); // can unbond a bit.. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 7); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 3); // but not less than 2 assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 6), + Pools::unbond(Origin::signed(10), 10, 6), Error::::MinimumBondNotMet ); @@ -3005,7 +2864,7 @@ mod unbond { // cannot unbond even 7, because the value of shares is now less. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 7), + Pools::unbond(Origin::signed(10), 10, 7), Error::::MinimumBondNotMet ); }); @@ -3023,7 +2882,7 @@ mod unbond { 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3042,7 +2901,7 @@ mod unbond { 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 3)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3058,7 +2917,7 @@ mod unbond { 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3082,18 +2941,18 @@ mod pool_withdraw_unbonded { fn pool_withdraw_unbonded_works() { ExtBuilder::default().build_and_execute(|| { // Given 10 unbond'ed directly against the pool account - assert_ok!(StakingMock::unbond(&default_bonded_account(), 5)); + assert_ok!(StakingMock::unbond(default_bonded_account(), 5)); // and the pool account only has 10 balance - assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(5)); - assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(10)); + assert_eq!(StakingMock::active_stake(&default_bonded_account()), Some(5)); + assert_eq!(StakingMock::total_stake(&default_bonded_account()), Some(10)); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); // When - assert_ok!(Pools::pool_withdraw_unbonded(RuntimeOrigin::signed(10), 1, 0)); + assert_ok!(Pools::pool_withdraw_unbonded(Origin::signed(10), 1, 0)); // Then there unbonding balance is no longer locked - assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(5)); - assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(5)); + assert_eq!(StakingMock::active_stake(&default_bonded_account()), Some(5)); + assert_eq!(StakingMock::total_stake(&default_bonded_account()), Some(5)); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); }); } @@ -3113,8 +2972,8 @@ mod withdraw_unbonded { // Given assert_eq!(StakingMock::bonding_duration(), 3); - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(550), 550)); - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(40), 40)); + assert_ok!(Pools::fully_unbond(Origin::signed(550), 550)); + assert_ok!(Pools::fully_unbond(Origin::signed(40), 40)); assert_eq!(Balances::free_balance(&default_bonded_account()), 600); let mut current_era = 1; @@ -3189,7 +3048,7 @@ mod withdraw_unbonded { ); // When - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(550), 550, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(550), 550, 0)); // Then assert_eq!( @@ -3209,7 +3068,7 @@ mod withdraw_unbonded { ); // When - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(40), 40, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(40), 40, 0)); // Then assert_eq!( @@ -3237,7 +3096,7 @@ mod withdraw_unbonded { CurrentEra::set(current_era); // when - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3247,7 +3106,6 @@ mod withdraw_unbonded { Event::Destroyed { pool_id: 1 } ] ); - assert!(!Metadata::::contains_key(1)); assert_eq!( balances_events_since_last_call(), vec![ @@ -3269,7 +3127,7 @@ mod withdraw_unbonded { // current bond is 600, we slash it all to 300. StakingMock::set_bonded_balance(default_bonded_account(), 300); Balances::make_free_balance_be(&default_bonded_account(), 300); - assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(300)); + assert_eq!(StakingMock::total_stake(&default_bonded_account()), Some(300)); assert_ok!(fully_unbond_permissioned(40)); assert_ok!(fully_unbond_permissioned(550)); @@ -3309,7 +3167,7 @@ mod withdraw_unbonded { CurrentEra::set(StakingMock::bonding_duration()); // When - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(40), 40, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(40), 40, 0)); // Then assert_eq!( @@ -3330,7 +3188,7 @@ mod withdraw_unbonded { ); // When - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(550), 550, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(550), 550, 0)); // Then assert_eq!( @@ -3358,12 +3216,8 @@ mod withdraw_unbonded { CurrentEra::set(CurrentEra::get() + 3); - // set metadata to check that it's being removed on dissolve - assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1])); - assert!(Metadata::::contains_key(1)); - // when - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // then assert_eq!(Balances::free_balance(&10), 10 + 35); @@ -3380,7 +3234,6 @@ mod withdraw_unbonded { Event::Destroyed { pool_id: 1 } ] ); - assert!(!Metadata::::contains_key(1)); assert_eq!( balances_events_since_last_call(), vec![ @@ -3399,7 +3252,7 @@ mod withdraw_unbonded { assert_eq!(Balances::free_balance(&10), 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); unsafe_set_state(1, PoolState::Destroying); - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(10), 10)); + assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); // Simulate a slash that is not accounted for in the sub pools. Balances::make_free_balance_be(&default_bonded_account(), 5); @@ -3412,7 +3265,7 @@ mod withdraw_unbonded { CurrentEra::set(0 + 3); // When - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // Then assert_eq!(Balances::free_balance(10), 10 + 35); @@ -3431,7 +3284,7 @@ mod withdraw_unbonded { SubPoolsStorage::::insert(1, sub_pools.clone()); assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), + Pools::withdraw_unbonded(Origin::signed(11), 11, 0), Error::::PoolMemberNotFound ); @@ -3444,7 +3297,7 @@ mod withdraw_unbonded { // We are still in the bonding duration assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), + Pools::withdraw_unbonded(Origin::signed(11), 11, 0), Error::::CannotWithdrawAny ); @@ -3461,8 +3314,8 @@ mod withdraw_unbonded { .add_members(vec![(100, 100), (200, 200)]) .build_and_execute(|| { // Given - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(100), 100)); - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(200), 200)); + assert_ok!(Pools::fully_unbond(Origin::signed(100), 100)); + assert_ok!(Pools::fully_unbond(Origin::signed(200), 200)); assert_eq!( BondedPool::::get(1).unwrap(), BondedPool { @@ -3479,7 +3332,7 @@ mod withdraw_unbonded { // Cannot kick when pool is open assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(902), 100, 0), + Pools::withdraw_unbonded(Origin::signed(902), 100, 0), Error::::NotKickerOrDestroying ); assert_eq!( @@ -3511,15 +3364,15 @@ mod withdraw_unbonded { // Cannot kick as a nominator assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(901), 100, 0), + Pools::withdraw_unbonded(Origin::signed(901), 100, 0), Error::::NotKickerOrDestroying ); // Can kick as root - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(900), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(900), 100, 0)); // Can kick as state toggler - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(900), 200, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(900), 200, 0)); assert_eq!(Balances::free_balance(100), 100 + 100); assert_eq!(Balances::free_balance(200), 200 + 200); @@ -3542,7 +3395,7 @@ mod withdraw_unbonded { fn withdraw_unbonded_destroying_permissionless() { ExtBuilder::default().add_members(vec![(100, 100)]).build_and_execute(|| { // Given - assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(100), 100)); + assert_ok!(Pools::fully_unbond(Origin::signed(100), 100)); assert_eq!( BondedPool::::get(1).unwrap(), BondedPool { @@ -3560,7 +3413,7 @@ mod withdraw_unbonded { // Cannot permissionlessly withdraw assert_noop!( - Pools::fully_unbond(RuntimeOrigin::signed(420), 100), + Pools::fully_unbond(Origin::signed(420), 100), Error::::NotKickerOrDestroying ); @@ -3568,7 +3421,7 @@ mod withdraw_unbonded { unsafe_set_state(1, PoolState::Destroying); // Can permissionlesly withdraw a member that is not the depositor - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(420), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(420), 100, 0)); assert_eq!(SubPoolsStorage::::get(1).unwrap(), Default::default(),); assert_eq!(Balances::free_balance(100), 100 + 100); @@ -3590,13 +3443,13 @@ mod withdraw_unbonded { #[test] fn partial_withdraw_unbonded_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); unsafe_set_state(1, PoolState::Destroying); // given - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 6)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 6)); CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, member_unbonding_eras!(3 => 6, 4 => 1) @@ -3627,13 +3480,13 @@ mod withdraw_unbonded { // when CurrentEra::set(2); assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0), + Pools::withdraw_unbonded(Origin::signed(10), 10, 0), Error::::CannotWithdrawAny ); // when CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // then assert_eq!( @@ -3656,7 +3509,7 @@ mod withdraw_unbonded { // when CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); // then assert_eq!( @@ -3671,7 +3524,7 @@ mod withdraw_unbonded { // when repeating: assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0), + Pools::withdraw_unbonded(Origin::signed(10), 10, 0), Error::::CannotWithdrawAny ); }); @@ -3681,9 +3534,9 @@ mod withdraw_unbonded { fn partial_withdraw_unbonded_non_depositor() { ExtBuilder::default().add_members(vec![(11, 10)]).build_and_execute(|| { // given - assert_ok!(Pools::unbond(RuntimeOrigin::signed(11), 11, 6)); + assert_ok!(Pools::unbond(Origin::signed(11), 11, 6)); CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(11), 11, 1)); + assert_ok!(Pools::unbond(Origin::signed(11), 11, 1)); assert_eq!( PoolMembers::::get(11).unwrap().unbonding_eras, member_unbonding_eras!(3 => 6, 4 => 1) @@ -3714,13 +3567,13 @@ mod withdraw_unbonded { // when CurrentEra::set(2); assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), + Pools::withdraw_unbonded(Origin::signed(11), 11, 0), Error::::CannotWithdrawAny ); // when CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(11), 11, 0)); // then assert_eq!( @@ -3743,7 +3596,7 @@ mod withdraw_unbonded { // when CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(11), 11, 0)); // then assert_eq!( @@ -3758,7 +3611,7 @@ mod withdraw_unbonded { // when repeating: assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), + Pools::withdraw_unbonded(Origin::signed(11), 11, 0), Error::::CannotWithdrawAny ); }); @@ -3768,7 +3621,7 @@ mod withdraw_unbonded { fn full_multi_step_withdrawing_non_depositor() { ExtBuilder::default().add_members(vec![(100, 100)]).build_and_execute(|| { // given - assert_ok!(Pools::unbond(RuntimeOrigin::signed(100), 100, 75)); + assert_ok!(Pools::unbond(Origin::signed(100), 100, 75)); assert_eq!( PoolMembers::::get(100).unwrap().unbonding_eras, member_unbonding_eras!(3 => 75) @@ -3776,20 +3629,20 @@ mod withdraw_unbonded { // progress one era and unbond the leftover. CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(100), 100, 25)); + assert_ok!(Pools::unbond(Origin::signed(100), 100, 25)); assert_eq!( PoolMembers::::get(100).unwrap().unbonding_eras, member_unbonding_eras!(3 => 75, 4 => 25) ); assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(100), 100, 0), + Pools::withdraw_unbonded(Origin::signed(100), 100, 0), Error::::CannotWithdrawAny ); // now the 75 should be free. CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(100), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(100), 100, 0)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3808,7 +3661,7 @@ mod withdraw_unbonded { // the 25 should be free now, and the member removed. CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(100), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(100), 100, 0)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3819,183 +3672,19 @@ mod withdraw_unbonded { }) } - #[test] - fn out_of_sync_unbonding_chunks() { - // the unbonding_eras in pool member are always fixed to the era at which they are unlocked, - // but the actual unbonding pools get pruned and might get combined in the no_era pool. - // Pools are only merged when one unbonds, so we unbond a little bit on every era to - // simulate this. - ExtBuilder::default() - .add_members(vec![(20, 100), (30, 100)]) - .build_and_execute(|| { - System::reset_events(); - - // when - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(30), 30, 5)); - - // then member-local unbonding is pretty much in sync with the global pools. - assert_eq!( - PoolMembers::::get(20).unwrap().unbonding_eras, - member_unbonding_eras!(3 => 5) - ); - assert_eq!( - PoolMembers::::get(30).unwrap().unbonding_eras, - member_unbonding_eras!(3 => 5) - ); - assert_eq!( - SubPoolsStorage::::get(1).unwrap(), - SubPools { - no_era: Default::default(), - with_era: unbonding_pools_with_era! { - 3 => UnbondPool { points: 10, balance: 10 } - } - } - ); - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 3 }, - Event::Unbonded { member: 30, pool_id: 1, points: 5, balance: 5, era: 3 }, - ] - ); - - // when - CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); - - // then still member-local unbonding is pretty much in sync with the global pools. - assert_eq!( - PoolMembers::::get(20).unwrap().unbonding_eras, - member_unbonding_eras!(3 => 5, 4 => 5) - ); - assert_eq!( - SubPoolsStorage::::get(1).unwrap(), - SubPools { - no_era: Default::default(), - with_era: unbonding_pools_with_era! { - 3 => UnbondPool { points: 10, balance: 10 }, - 4 => UnbondPool { points: 5, balance: 5 } - } - } - ); - assert_eq!( - pool_events_since_last_call(), - vec![Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 4 }] - ); - - // when - CurrentEra::set(2); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); - - // then still member-local unbonding is pretty much in sync with the global pools. - assert_eq!( - PoolMembers::::get(20).unwrap().unbonding_eras, - member_unbonding_eras!(3 => 5, 4 => 5, 5 => 5) - ); - assert_eq!( - SubPoolsStorage::::get(1).unwrap(), - SubPools { - no_era: Default::default(), - with_era: unbonding_pools_with_era! { - 3 => UnbondPool { points: 10, balance: 10 }, - 4 => UnbondPool { points: 5, balance: 5 }, - 5 => UnbondPool { points: 5, balance: 5 } - } - } - ); - assert_eq!( - pool_events_since_last_call(), - vec![Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 5 }] - ); - - // when - CurrentEra::set(5); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); - - // then - assert_eq!( - PoolMembers::::get(20).unwrap().unbonding_eras, - member_unbonding_eras!(3 => 5, 4 => 5, 5 => 5, 8 => 5) - ); - assert_eq!( - SubPoolsStorage::::get(1).unwrap(), - SubPools { - // era 3 is merged into no_era. - no_era: UnbondPool { points: 10, balance: 10 }, - with_era: unbonding_pools_with_era! { - 4 => UnbondPool { points: 5, balance: 5 }, - 5 => UnbondPool { points: 5, balance: 5 }, - 8 => UnbondPool { points: 5, balance: 5 } - } - } - ); - assert_eq!( - pool_events_since_last_call(), - vec![Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 8 }] - ); - - // now we start withdrawing unlocked bonds. - - // when - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); - // then - assert_eq!( - PoolMembers::::get(20).unwrap().unbonding_eras, - member_unbonding_eras!(8 => 5) - ); - assert_eq!( - SubPoolsStorage::::get(1).unwrap(), - SubPools { - // era 3 is merged into no_era. - no_era: UnbondPool { points: 5, balance: 5 }, - with_era: unbonding_pools_with_era! { - 8 => UnbondPool { points: 5, balance: 5 } - } - } - ); - assert_eq!( - pool_events_since_last_call(), - vec![Event::Withdrawn { member: 20, pool_id: 1, points: 15, balance: 15 }] - ); - - // when - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(30), 30, 0)); - // then - assert_eq!( - PoolMembers::::get(30).unwrap().unbonding_eras, - member_unbonding_eras!() - ); - assert_eq!( - SubPoolsStorage::::get(1).unwrap(), - SubPools { - // era 3 is merged into no_era. - no_era: Default::default(), - with_era: unbonding_pools_with_era! { - 8 => UnbondPool { points: 5, balance: 5 } - } - } - ); - assert_eq!( - pool_events_since_last_call(), - vec![Event::Withdrawn { member: 30, pool_id: 1, points: 5, balance: 5 }] - ); - }) - } - #[test] fn full_multi_step_withdrawing_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { // depositor now has 20, they can unbond to 10. assert_eq!(Pools::depositor_min_bond(), 10); - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); // now they can. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 7)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 7)); // progress one era and unbond the leftover. CurrentEra::set(1); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, @@ -4004,27 +3693,27 @@ mod withdraw_unbonded { // they can't unbond to a value below 10 other than 0.. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 5), + Pools::unbond(Origin::signed(10), 10, 5), Error::::MinimumBondNotMet ); // but not even full, because they pool is not yet destroying. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 10), + Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet ); // but now they can. unsafe_set_state(1, PoolState::Destroying); assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 5), + Pools::unbond(Origin::signed(10), 10, 5), Error::::MinimumBondNotMet ); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); // now the 7 should be free. CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), @@ -4045,7 +3734,7 @@ mod withdraw_unbonded { // the 13 should be free now, and the member removed. CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), @@ -4055,7 +3744,6 @@ mod withdraw_unbonded { Event::Destroyed { pool_id: 1 }, ] ); - assert!(!Metadata::::contains_key(1)); }) } } @@ -4073,15 +3761,12 @@ mod create { assert!(!BondedPools::::contains_key(2)); assert!(!RewardPools::::contains_key(2)); assert!(!PoolMembers::::contains_key(11)); - assert_err!( - StakingMock::active_stake(&next_pool_stash), - DispatchError::Other("balance not found") - ); + assert_eq!(StakingMock::active_stake(&next_pool_stash), None); - Balances::make_free_balance_be(&11, StakingMock::minimum_nominator_bond() + ed); + Balances::make_free_balance_be(&11, StakingMock::minimum_bond() + ed); assert_ok!(Pools::create( - RuntimeOrigin::signed(11), - StakingMock::minimum_nominator_bond(), + Origin::signed(11), + StakingMock::minimum_bond(), 123, 456, 789 @@ -4092,7 +3777,7 @@ mod create { PoolMembers::::get(11).unwrap(), PoolMember { pool_id: 2, - points: StakingMock::minimum_nominator_bond(), + points: StakingMock::minimum_bond(), ..Default::default() } ); @@ -4101,7 +3786,7 @@ mod create { BondedPool { id: 2, inner: BondedPoolInner { - points: StakingMock::minimum_nominator_bond(), + points: StakingMock::minimum_bond(), member_counter: 1, state: PoolState::Open, roles: PoolRoles { @@ -4115,7 +3800,7 @@ mod create { ); assert_eq!( StakingMock::active_stake(&next_pool_stash).unwrap(), - StakingMock::minimum_nominator_bond() + StakingMock::minimum_bond() ); assert_eq!( RewardPools::::get(2).unwrap(), @@ -4138,17 +3823,17 @@ mod create { fn create_errors_correctly() { ExtBuilder::default().with_check(0).build_and_execute(|| { assert_noop!( - Pools::create(RuntimeOrigin::signed(10), 420, 123, 456, 789), + Pools::create(Origin::signed(10), 420, 123, 456, 789), Error::::AccountBelongsToOtherPool ); // Given assert_eq!(MinCreateBond::::get(), 2); - assert_eq!(StakingMock::minimum_nominator_bond(), 10); + assert_eq!(StakingMock::minimum_bond(), 10); // Then assert_noop!( - Pools::create(RuntimeOrigin::signed(11), 9, 123, 456, 789), + Pools::create(Origin::signed(11), 9, 123, 456, 789), Error::::MinimumBondNotMet ); @@ -4157,7 +3842,7 @@ mod create { // Then assert_noop!( - Pools::create(RuntimeOrigin::signed(11), 19, 123, 456, 789), + Pools::create(Origin::signed(11), 19, 123, 456, 789), Error::::MinimumBondNotMet ); @@ -4177,7 +3862,7 @@ mod create { // Then assert_noop!( - Pools::create(RuntimeOrigin::signed(11), 20, 123, 456, 789), + Pools::create(Origin::signed(11), 20, 123, 456, 789), Error::::MaxPools ); @@ -4188,54 +3873,13 @@ mod create { Balances::make_free_balance_be(&11, 5 + 20); // Then - let create = RuntimeCall::Pools(crate::Call::::create { + let create = Call::Pools(crate::Call::::create { amount: 20, root: 11, nominator: 11, state_toggler: 11, }); - assert_noop!( - create.dispatch(RuntimeOrigin::signed(11)), - Error::::MaxPoolMembers - ); - }); - } - - #[test] - fn create_with_pool_id_works() { - ExtBuilder::default().build_and_execute(|| { - let ed = Balances::minimum_balance(); - - Balances::make_free_balance_be(&11, StakingMock::minimum_nominator_bond() + ed); - assert_ok!(Pools::create( - RuntimeOrigin::signed(11), - StakingMock::minimum_nominator_bond(), - 123, - 456, - 789 - )); - - assert_eq!(Balances::free_balance(&11), 0); - // delete the initial pool created, then pool_Id `1` will be free - - assert_noop!( - Pools::create_with_pool_id(RuntimeOrigin::signed(12), 20, 234, 654, 783, 1), - Error::::PoolIdInUse - ); - - assert_noop!( - Pools::create_with_pool_id(RuntimeOrigin::signed(12), 20, 234, 654, 783, 3), - Error::::InvalidPoolId - ); - - // start dismantling the pool. - assert_ok!(Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Destroying)); - assert_ok!(fully_unbond_permissioned(10)); - - CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 10)); - - assert_ok!(Pools::create_with_pool_id(RuntimeOrigin::signed(10), 20, 234, 654, 783, 1)); + assert_noop!(create.dispatch(Origin::signed(11)), Error::::MaxPoolMembers); }); } } @@ -4248,27 +3892,27 @@ mod nominate { ExtBuilder::default().build_and_execute(|| { // Depositor can't nominate assert_noop!( - Pools::nominate(RuntimeOrigin::signed(10), 1, vec![21]), + Pools::nominate(Origin::signed(10), 1, vec![21]), Error::::NotNominator ); // State toggler can't nominate assert_noop!( - Pools::nominate(RuntimeOrigin::signed(902), 1, vec![21]), + Pools::nominate(Origin::signed(902), 1, vec![21]), Error::::NotNominator ); // Root can nominate - assert_ok!(Pools::nominate(RuntimeOrigin::signed(900), 1, vec![21])); + assert_ok!(Pools::nominate(Origin::signed(900), 1, vec![21])); assert_eq!(Nominations::get().unwrap(), vec![21]); // Nominator can nominate - assert_ok!(Pools::nominate(RuntimeOrigin::signed(901), 1, vec![31])); + assert_ok!(Pools::nominate(Origin::signed(901), 1, vec![31])); assert_eq!(Nominations::get().unwrap(), vec![31]); // Can't nominate for a pool that doesn't exist assert_noop!( - Pools::nominate(RuntimeOrigin::signed(902), 123, vec![21]), + Pools::nominate(Origin::signed(902), 123, vec![21]), Error::::PoolNotFound ); }); @@ -4282,20 +3926,20 @@ mod set_state { fn set_state_works() { ExtBuilder::default().build_and_execute(|| { // Given - assert_ok!(BondedPool::::get(1).unwrap().ok_to_be_open()); + assert_ok!(BondedPool::::get(1).unwrap().ok_to_be_open(0)); // Only the root and state toggler can change the state when the pool is ok to be open. assert_noop!( - Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Blocked), + Pools::set_state(Origin::signed(10), 1, PoolState::Blocked), Error::::CanNotChangeState ); assert_noop!( - Pools::set_state(RuntimeOrigin::signed(901), 1, PoolState::Blocked), + Pools::set_state(Origin::signed(901), 1, PoolState::Blocked), Error::::CanNotChangeState ); // Root can change state - assert_ok!(Pools::set_state(RuntimeOrigin::signed(900), 1, PoolState::Blocked)); + assert_ok!(Pools::set_state(Origin::signed(900), 1, PoolState::Blocked)); assert_eq!( pool_events_since_last_call(), @@ -4309,16 +3953,16 @@ mod set_state { assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Blocked); // State toggler can change state - assert_ok!(Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(Origin::signed(902), 1, PoolState::Destroying)); assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); // If the pool is destroying, then no one can set state assert_noop!( - Pools::set_state(RuntimeOrigin::signed(900), 1, PoolState::Blocked), + Pools::set_state(Origin::signed(900), 1, PoolState::Blocked), Error::::CanNotChangeState ); assert_noop!( - Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Blocked), + Pools::set_state(Origin::signed(902), 1, PoolState::Blocked), Error::::CanNotChangeState ); @@ -4330,7 +3974,7 @@ mod set_state { bonded_pool.points = 100; bonded_pool.put(); // When - assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(Origin::signed(11), 1, PoolState::Destroying)); // Then assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); @@ -4338,15 +3982,15 @@ mod set_state { Balances::make_free_balance_be(&default_bonded_account(), Balance::max_value() / 10); unsafe_set_state(1, PoolState::Open); // When - assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(Origin::signed(11), 1, PoolState::Destroying)); // Then assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); - // If the pool is not ok to be open, it cannot be permissionlessly set to a state that + // If the pool is not ok to be open, it cannot be permissionleslly set to a state that // isn't destroying unsafe_set_state(1, PoolState::Open); assert_noop!( - Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Blocked), + Pools::set_state(Origin::signed(11), 1, PoolState::Blocked), Error::::CanNotChangeState ); @@ -4369,28 +4013,28 @@ mod set_metadata { fn set_metadata_works() { ExtBuilder::default().build_and_execute(|| { // Root can set metadata - assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1])); + assert_ok!(Pools::set_metadata(Origin::signed(900), 1, vec![1, 1])); assert_eq!(Metadata::::get(1), vec![1, 1]); // State toggler can set metadata - assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(902), 1, vec![2, 2])); + assert_ok!(Pools::set_metadata(Origin::signed(902), 1, vec![2, 2])); assert_eq!(Metadata::::get(1), vec![2, 2]); // Depositor can't set metadata assert_noop!( - Pools::set_metadata(RuntimeOrigin::signed(10), 1, vec![3, 3]), + Pools::set_metadata(Origin::signed(10), 1, vec![3, 3]), Error::::DoesNotHavePermission ); // Nominator can't set metadata assert_noop!( - Pools::set_metadata(RuntimeOrigin::signed(901), 1, vec![3, 3]), + Pools::set_metadata(Origin::signed(901), 1, vec![3, 3]), Error::::DoesNotHavePermission ); // Metadata cannot be longer than `MaxMetadataLen` assert_noop!( - Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1, 1]), + Pools::set_metadata(Origin::signed(900), 1, vec![1, 1, 1]), Error::::MetadataExceedsMaxLen ); }); @@ -4405,7 +4049,7 @@ mod set_configs { ExtBuilder::default().build_and_execute(|| { // Setting works assert_ok!(Pools::set_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Set(1 as Balance), ConfigOp::Set(2 as Balance), ConfigOp::Set(3u32), @@ -4420,7 +4064,7 @@ mod set_configs { // Noop does nothing assert_storage_noop!(assert_ok!(Pools::set_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Noop, @@ -4430,7 +4074,7 @@ mod set_configs { // Removing works assert_ok!(Pools::set_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Remove, ConfigOp::Remove, ConfigOp::Remove, @@ -4462,7 +4106,7 @@ mod bond_extra { assert_eq!(Balances::free_balance(10), 100); // when - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); // then assert_eq!(Balances::free_balance(10), 90); @@ -4479,7 +4123,7 @@ mod bond_extra { ); // when - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(20))); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(20))); // then assert_eq!(Balances::free_balance(10), 70); @@ -4508,7 +4152,7 @@ mod bond_extra { assert_eq!(Balances::free_balance(10), 35); // when - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); // then assert_eq!(Balances::free_balance(10), 35); @@ -4551,7 +4195,7 @@ mod bond_extra { assert_eq!(Balances::free_balance(20), 20); // when - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); // then assert_eq!(Balances::free_balance(10), 35); @@ -4560,7 +4204,7 @@ mod bond_extra { assert_eq!(BondedPools::::get(1).unwrap().points, 30 + 1); // when - assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::Rewards)); // then assert_eq!(Balances::free_balance(20), 20); @@ -4604,7 +4248,7 @@ mod update_roles { // non-existent pools assert_noop!( Pools::update_roles( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4616,7 +4260,7 @@ mod update_roles { // depositor cannot change roles. assert_noop!( Pools::update_roles( - RuntimeOrigin::signed(1), + Origin::signed(1), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4628,7 +4272,7 @@ mod update_roles { // nominator cannot change roles. assert_noop!( Pools::update_roles( - RuntimeOrigin::signed(901), + Origin::signed(901), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4639,7 +4283,7 @@ mod update_roles { // state-toggler assert_noop!( Pools::update_roles( - RuntimeOrigin::signed(902), + Origin::signed(902), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4650,7 +4294,7 @@ mod update_roles { // but root can assert_ok!(Pools::update_roles( - RuntimeOrigin::signed(900), + Origin::signed(900), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4681,7 +4325,7 @@ mod update_roles { // also root origin can assert_ok!(Pools::update_roles( - RuntimeOrigin::root(), + Origin::root(), 1, ConfigOp::Set(1), ConfigOp::Set(2), @@ -4708,7 +4352,7 @@ mod update_roles { // Noop works assert_ok!(Pools::update_roles( - RuntimeOrigin::root(), + Origin::root(), 1, ConfigOp::Set(11), ConfigOp::Noop, @@ -4736,7 +4380,7 @@ mod update_roles { // Remove works assert_ok!(Pools::update_roles( - RuntimeOrigin::root(), + Origin::root(), 1, ConfigOp::Set(69), ConfigOp::Remove, @@ -4812,7 +4456,7 @@ mod reward_counter_precision { // tad bit less. cannot be paid out. assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += expected_smallest_reward - 1)); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); assert_eq!(pool_events_since_last_call(), vec![]); // revert it. @@ -4822,7 +4466,7 @@ mod reward_counter_precision { // tad bit more. can be claimed. assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += expected_smallest_reward + 1)); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); assert_eq!( pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 1173 }] @@ -4830,41 +4474,6 @@ mod reward_counter_precision { }) } - #[test] - fn massive_reward_in_small_pool() { - let tiny_bond = 1000 * DOT; - ExtBuilder::default().ed(DOT).min_bond(tiny_bond).build_and_execute(|| { - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Created { depositor: 10, pool_id: 1 }, - Event::Bonded { member: 10, pool_id: 1, bonded: 10000000000000, joined: true } - ] - ); - - Balances::make_free_balance_be(&20, tiny_bond); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), tiny_bond / 2, 1)); - - // Suddenly, add a shit ton of rewards. - assert_ok!( - Balances::mutate_account(&default_reward_account(), |a| a.free += inflation(1)) - ); - - // now claim. - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); - - assert_eq!( - pool_events_since_last_call(), - vec![ - Event::Bonded { member: 20, pool_id: 1, bonded: 5000000000000, joined: true }, - Event::PaidOut { member: 10, pool_id: 1, payout: 7333333333333333333 }, - Event::PaidOut { member: 20, pool_id: 1, payout: 3666666666666666666 } - ] - ); - }) - } - #[test] fn reward_counter_calc_wont_fail_in_normal_polkadot_future() { // create a pool that has roughly half of the polkadot issuance in 10 years. @@ -4893,7 +4502,7 @@ mod reward_counter_precision { // some whale now joins with the other half ot the total issuance. This will bloat all // the calculation regarding current reward counter. Balances::make_free_balance_be(&20, pool_bond * 2); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), pool_bond, 1)); + assert_ok!(Pools::join(Origin::signed(20), pool_bond, 1)); assert_eq!( pool_events_since_last_call(), @@ -4905,8 +4514,8 @@ mod reward_counter_precision { }] ); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -4915,12 +4524,12 @@ mod reward_counter_precision { // now let a small member join with 10 DOTs. Balances::make_free_balance_be(&30, 20 * DOT); - assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10 * DOT, 1)); + assert_ok!(Pools::join(Origin::signed(30), 10 * DOT, 1)); // and give a reasonably small reward to the pool. assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += DOT)); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); + assert_ok!(Pools::claim_payout(Origin::signed(30))); assert_eq!( pool_events_since_last_call(), vec![ @@ -4958,10 +4567,7 @@ mod reward_counter_precision { // set to zero. In other tests that we want to assert a scenario won't fail, we should // also set the reward counters to some large value. Balances::make_free_balance_be(&20, pool_bond * 2); - assert_err!( - Pools::join(RuntimeOrigin::signed(20), pool_bond, 1), - Error::::OverflowRisk - ); + assert_err!(Pools::join(Origin::signed(20), pool_bond, 1), Error::::OverflowRisk); }) } @@ -4987,7 +4593,7 @@ mod reward_counter_precision { // and have a tiny fish join the pool as well.. Balances::make_free_balance_be(&20, 20 * DOT); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10 * DOT, 1)); + assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); // earn some small rewards assert_ok!( @@ -4996,7 +4602,7 @@ mod reward_counter_precision { // no point in claiming for 20 (nonetheless, it should be harmless) assert!(pending_rewards(20).unwrap().is_zero()); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); assert_eq!( pool_events_since_last_call(), vec![ @@ -5016,7 +4622,7 @@ mod reward_counter_precision { Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) ); assert!(pending_rewards(20).unwrap().is_zero()); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); assert_eq!( pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10000000 }] @@ -5027,8 +4633,8 @@ mod reward_counter_precision { Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) ); assert_eq!(pending_rewards(20).unwrap(), 1); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), vec![ @@ -5061,7 +4667,7 @@ mod reward_counter_precision { // and have a tiny fish join the pool as well.. Balances::make_free_balance_be(&20, 20 * DOT); - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10 * DOT, 1)); + assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); // earn some small rewards assert_ok!( @@ -5071,8 +4677,8 @@ mod reward_counter_precision { // if 20 claims now, their reward counter should stay the same, so that they have a // chance of claiming this if they let it accumulate. Also see // `if_small_member_waits_long_enough_they_will_earn_rewards` - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(Origin::signed(20))); assert_eq!( pool_events_since_last_call(), vec![ diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index 1062b1749d417..a9003ffd3fb4c 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_nomination_pools //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-06-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet // --chain=dev @@ -35,7 +35,6 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/nomination-pools/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,120 +69,111 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - // Minimum execution time: 159_948 nanoseconds. - Weight::from_ref_time(161_133_000 as u64) - .saturating_add(T::DbWeight::get().reads(17 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) + (123_947_000 as Weight) + .saturating_add(T::DbWeight::get().reads(17 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:2) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - // Minimum execution time: 155_517 nanoseconds. - Weight::from_ref_time(159_101_000 as u64) - .saturating_add(T::DbWeight::get().reads(14 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) + (118_236_000 as Weight) + .saturating_add(T::DbWeight::get().reads(14 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:3) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - // Minimum execution time: 172_788 nanoseconds. - Weight::from_ref_time(174_212_000 as u64) - .saturating_add(T::DbWeight::get().reads(14 as u64)) - .saturating_add(T::DbWeight::get().writes(13 as u64)) + (132_475_000 as Weight) + .saturating_add(T::DbWeight::get().reads(14 as Weight)) + .saturating_add(T::DbWeight::get().writes(13 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - // Minimum execution time: 64_560 nanoseconds. - Weight::from_ref_time(64_950_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (50_299_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListBags (r:2 w:2) // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - // Minimum execution time: 161_398 nanoseconds. - Weight::from_ref_time(162_991_000 as u64) - .saturating_add(T::DbWeight::get().reads(18 as u64)) - .saturating_add(T::DbWeight::get().writes(13 as u64)) + (121_254_000 as Weight) + .saturating_add(T::DbWeight::get().reads(18 as Weight)) + .saturating_add(T::DbWeight::get().writes(13 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - // Minimum execution time: 66_036 nanoseconds. - Weight::from_ref_time(67_183_304 as u64) - // Standard Error: 565 - .saturating_add(Weight::from_ref_time(57_830 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (41_928_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 111_156 nanoseconds. - Weight::from_ref_time(112_507_059 as u64) - // Standard Error: 655 - .saturating_add(Weight::from_ref_time(53_711 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(9 as u64)) - .saturating_add(T::DbWeight::get().writes(7 as u64)) + (81_611_000 as Weight) + // Standard Error: 1_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(7 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) - // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) @@ -195,32 +185,29 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) - // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - // Minimum execution time: 168_270 nanoseconds. - Weight::from_ref_time(170_059_380 as u64) - // Standard Error: 1_506 - .saturating_add(Weight::from_ref_time(1_258 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(20 as u64)) - .saturating_add(T::DbWeight::get().writes(17 as u64)) + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + (139_849_000 as Weight) + .saturating_add(T::DbWeight::get().reads(19 as Weight)) + .saturating_add(T::DbWeight::get().writes(16 as Weight)) } - // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools MaxPools (r:1 w:0) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: NominationPools PoolMembers (r:1 w:1) + // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) // Storage: System Account (r:2 w:2) - // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) @@ -229,53 +216,48 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 146_153 nanoseconds. - Weight::from_ref_time(146_955_000 as u64) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + (126_246_000 as Weight) + .saturating_add(T::DbWeight::get().reads(22 as Weight)) + .saturating_add(T::DbWeight::get().writes(15 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking MaxNominatorsCount (r:1 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 71_380 nanoseconds. - Weight::from_ref_time(71_060_388 as u64) - // Standard Error: 2_587 - .saturating_add(Weight::from_ref_time(1_185_729 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (48_829_000 as Weight) + // Standard Error: 10_000 + .saturating_add((2_204_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(12 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - // Minimum execution time: 46_275 nanoseconds. - Weight::from_ref_time(46_689_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (26_761_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - // Minimum execution time: 19_246 nanoseconds. - Weight::from_ref_time(20_415_018 as u64) - // Standard Error: 95 - .saturating_add(Weight::from_ref_time(2_040 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (14_519_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -283,31 +265,27 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - // Minimum execution time: 9_231 nanoseconds. - Weight::from_ref_time(9_526_000 as u64) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (6_173_000 as Weight) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - // Minimum execution time: 31_246 nanoseconds. - Weight::from_ref_time(31_762_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (22_261_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 73_812 nanoseconds. - Weight::from_ref_time(74_790_000 as u64) - .saturating_add(T::DbWeight::get().reads(9 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (47_959_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } } @@ -316,120 +294,111 @@ impl WeightInfo for () { // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - // Minimum execution time: 159_948 nanoseconds. - Weight::from_ref_time(161_133_000 as u64) - .saturating_add(RocksDbWeight::get().reads(17 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) + (123_947_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(17 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:2) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - // Minimum execution time: 155_517 nanoseconds. - Weight::from_ref_time(159_101_000 as u64) - .saturating_add(RocksDbWeight::get().reads(14 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) + (118_236_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(14 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:3) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - // Minimum execution time: 172_788 nanoseconds. - Weight::from_ref_time(174_212_000 as u64) - .saturating_add(RocksDbWeight::get().reads(14 as u64)) - .saturating_add(RocksDbWeight::get().writes(13 as u64)) + (132_475_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(14 as Weight)) + .saturating_add(RocksDbWeight::get().writes(13 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - // Minimum execution time: 64_560 nanoseconds. - Weight::from_ref_time(64_950_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (50_299_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListBags (r:2 w:2) // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - // Minimum execution time: 161_398 nanoseconds. - Weight::from_ref_time(162_991_000 as u64) - .saturating_add(RocksDbWeight::get().reads(18 as u64)) - .saturating_add(RocksDbWeight::get().writes(13 as u64)) + (121_254_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(18 as Weight)) + .saturating_add(RocksDbWeight::get().writes(13 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - // Minimum execution time: 66_036 nanoseconds. - Weight::from_ref_time(67_183_304 as u64) - // Standard Error: 565 - .saturating_add(Weight::from_ref_time(57_830 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (41_928_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 111_156 nanoseconds. - Weight::from_ref_time(112_507_059 as u64) - // Standard Error: 655 - .saturating_add(Weight::from_ref_time(53_711 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(9 as u64)) - .saturating_add(RocksDbWeight::get().writes(7 as u64)) + (81_611_000 as Weight) + // Standard Error: 1_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(7 as Weight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) - // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) @@ -441,32 +410,29 @@ impl WeightInfo for () { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) - // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - // Minimum execution time: 168_270 nanoseconds. - Weight::from_ref_time(170_059_380 as u64) - // Standard Error: 1_506 - .saturating_add(Weight::from_ref_time(1_258 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(20 as u64)) - .saturating_add(RocksDbWeight::get().writes(17 as u64)) + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + (139_849_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(19 as Weight)) + .saturating_add(RocksDbWeight::get().writes(16 as Weight)) } - // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools MaxPools (r:1 w:0) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: NominationPools PoolMembers (r:1 w:1) + // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) // Storage: System Account (r:2 w:2) - // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) @@ -475,53 +441,48 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 146_153 nanoseconds. - Weight::from_ref_time(146_955_000 as u64) - .saturating_add(RocksDbWeight::get().reads(21 as u64)) - .saturating_add(RocksDbWeight::get().writes(15 as u64)) + (126_246_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(22 as Weight)) + .saturating_add(RocksDbWeight::get().writes(15 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking MaxNominatorsCount (r:1 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 71_380 nanoseconds. - Weight::from_ref_time(71_060_388 as u64) - // Standard Error: 2_587 - .saturating_add(Weight::from_ref_time(1_185_729 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (48_829_000 as Weight) + // Standard Error: 10_000 + .saturating_add((2_204_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(12 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - // Minimum execution time: 46_275 nanoseconds. - Weight::from_ref_time(46_689_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (26_761_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - // Minimum execution time: 19_246 nanoseconds. - Weight::from_ref_time(20_415_018 as u64) - // Standard Error: 95 - .saturating_add(Weight::from_ref_time(2_040 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (14_519_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -529,30 +490,26 @@ impl WeightInfo for () { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - // Minimum execution time: 9_231 nanoseconds. - Weight::from_ref_time(9_526_000 as u64) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (6_173_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - // Minimum execution time: 31_246 nanoseconds. - Weight::from_ref_time(31_762_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (22_261_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: NominationPools BondedPools (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 73_812 nanoseconds. - Weight::from_ref_time(74_790_000 as u64) - .saturating_add(RocksDbWeight::get().reads(9 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (47_959_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } } diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 00e0e40ce33b0..5a7cd494362ca 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -35,16 +35,13 @@ fn pool_lifecycle_e2e() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_ok!(Pools::create(Origin::signed(10), 50, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); // have the pool nominate. - assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + assert_ok!(Pools::nominate(Origin::signed(10), 1, vec![1, 2, 3])); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 50),]); assert_eq!( pool_events_since_last_call(), vec![ @@ -54,15 +51,12 @@ fn pool_lifecycle_e2e() { ); // have two members join - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); - assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + assert_ok!(Pools::join(Origin::signed(21), 10, 1)); assert_eq!( staking_events_since_last_call(), - vec![ - StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, - StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, - ] + vec![StakingEvent::Bonded(POOL1_BONDED, 10), StakingEvent::Bonded(POOL1_BONDED, 10),] ); assert_eq!( pool_events_since_last_call(), @@ -73,17 +67,17 @@ fn pool_lifecycle_e2e() { ); // pool goes into destroying - assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(Origin::signed(10), 1, PoolState::Destroying)); // depositor cannot unbond yet. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + Pools::unbond(Origin::signed(10), 10, 50), PoolsError::::MinimumBondNotMet, ); // now the members want to unbond. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(Origin::signed(21), 21, 10)); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); assert_eq!(PoolMembers::::get(20).unwrap().points, 0); @@ -93,8 +87,8 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded(POOL1_BONDED, 10), ] ); assert_eq!( @@ -108,14 +102,14 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + Pools::unbond(Origin::signed(10), 10, 50), PoolsError::::MinimumBondNotMet, ); for e in 1..BondingDuration::get() { CurrentEra::::set(Some(e)); assert_noop!( - Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0), + Pools::withdraw_unbonded(Origin::signed(20), 20, 0), PoolsError::::CannotWithdrawAny ); } @@ -125,19 +119,19 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + Pools::unbond(Origin::signed(10), 10, 50), PoolsError::::MinimumBondNotMet, ); // but members can now withdraw. - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(21), 21, 0)); assert!(PoolMembers::::get(20).is_none()); assert!(PoolMembers::::get(21).is_none()); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },] + vec![StakingEvent::Withdrawn(POOL1_BONDED, 20),] ); assert_eq!( pool_events_since_last_call(), @@ -152,19 +146,16 @@ fn pool_lifecycle_e2e() { // as soon as all members have left, the depositor can try to unbond, but since the // min-nominator intention is set, they must chill first. assert_noop!( - Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + Pools::unbond(Origin::signed(10), 10, 50), pallet_staking::Error::::InsufficientBond ); - assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50)); + assert_ok!(Pools::chill(Origin::signed(10), 1)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 50)); assert_eq!( staking_events_since_last_call(), - vec![ - StakingEvent::Chilled { stash: POOL1_BONDED }, - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, - ] + vec![StakingEvent::Chilled(POOL1_BONDED), StakingEvent::Unbonded(POOL1_BONDED, 50),] ); assert_eq!( pool_events_since_last_call(), @@ -173,12 +164,12 @@ fn pool_lifecycle_e2e() { // waiting another bonding duration: CurrentEra::::set(Some(BondingDuration::get() * 2)); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 1)); // pools is fully destroyed now. assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] + vec![StakingEvent::Withdrawn(POOL1_BONDED, 50),] ); assert_eq!( pool_events_since_last_call(), @@ -199,13 +190,10 @@ fn pool_slash_e2e() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); assert_eq!( pool_events_since_last_call(), vec![ @@ -217,15 +205,12 @@ fn pool_slash_e2e() { assert_eq!(Payee::::get(POOL1_BONDED), RewardDestination::Account(POOL1_REWARD)); // have two members join - assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); - assert_ok!(Pools::join(RuntimeOrigin::signed(21), 20, 1)); + assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + assert_ok!(Pools::join(Origin::signed(21), 20, 1)); assert_eq!( staking_events_since_last_call(), - vec![ - StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, - StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } - ] + vec![StakingEvent::Bonded(POOL1_BONDED, 20), StakingEvent::Bonded(POOL1_BONDED, 20)] ); assert_eq!( pool_events_since_last_call(), @@ -239,14 +224,14 @@ fn pool_slash_e2e() { CurrentEra::::set(Some(1)); // 20 / 80 of the total funds are unlocked, and safe from any further slash. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 } + StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded(POOL1_BONDED, 10) ] ); assert_eq!( @@ -261,16 +246,16 @@ fn pool_slash_e2e() { // note: depositor cannot fully unbond at this point. // these funds will still get slashed. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(Origin::signed(21), 21, 10)); assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded(POOL1_BONDED, 10), ] ); @@ -293,10 +278,7 @@ fn pool_slash_e2e() { 2, // slash era 2, affects chunks at era 5 onwards. ); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); assert_eq!( pool_events_since_last_call(), vec![ @@ -308,7 +290,7 @@ fn pool_slash_e2e() { ); CurrentEra::::set(Some(3)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_ok!(Pools::unbond(Origin::signed(21), 21, 10)); assert_eq!( PoolMembers::::get(21).unwrap(), @@ -320,10 +302,7 @@ fn pool_slash_e2e() { unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) } ); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, 5)]); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] @@ -331,8 +310,8 @@ fn pool_slash_e2e() { // now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free. CurrentEra::::set(Some(6)); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(21), 21, 0)); assert_eq!( pool_events_since_last_call(), @@ -348,16 +327,16 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), // a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked - vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }] + vec![StakingEvent::Withdrawn(POOL1_BONDED, 15 + 10 + 15)] ); // now, finally, we can unbond the depositor further than their current limit. - assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20)); + assert_ok!(Pools::set_state(Origin::signed(10), 1, PoolState::Destroying)); + assert_ok!(Pools::unbond(Origin::signed(10), 10, 20)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }] + vec![StakingEvent::Unbonded(POOL1_BONDED, 10)] ); assert_eq!( pool_events_since_last_call(), @@ -378,11 +357,11 @@ fn pool_slash_e2e() { } ); // withdraw the depositor, they should lose 12 balance in total due to slash. - assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }] + vec![StakingEvent::Withdrawn(POOL1_BONDED, 10)] ); assert_eq!( pool_events_since_last_call(), @@ -406,13 +385,10 @@ fn pool_slash_proportional() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); assert_eq!( pool_events_since_last_call(), vec![ @@ -423,16 +399,16 @@ fn pool_slash_proportional() { // have two members join let bond = 20; - assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); - assert_ok!(Pools::join(RuntimeOrigin::signed(21), bond, 1)); - assert_ok!(Pools::join(RuntimeOrigin::signed(22), bond, 1)); + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_ok!(Pools::join(Origin::signed(21), bond, 1)); + assert_ok!(Pools::join(Origin::signed(22), bond, 1)); assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, - StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, - StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded(POOL1_BONDED, bond), ] ); assert_eq!( @@ -448,11 +424,11 @@ fn pool_slash_proportional() { CurrentEra::::set(Some(99)); // and unbond - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] ); assert_eq!( pool_events_since_last_call(), @@ -466,10 +442,10 @@ fn pool_slash_proportional() { ); CurrentEra::::set(Some(100)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); + assert_ok!(Pools::unbond(Origin::signed(21), 21, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] ); assert_eq!( pool_events_since_last_call(), @@ -483,10 +459,10 @@ fn pool_slash_proportional() { ); CurrentEra::::set(Some(101)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); + assert_ok!(Pools::unbond(Origin::signed(22), 22, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] ); assert_eq!( pool_events_since_last_call(), @@ -510,17 +486,16 @@ fn pool_slash_proportional() { 100, ); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); assert_eq!( pool_events_since_last_call(), vec![ - // This era got slashed 12.5, which rounded up to 13. - PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 7 }, - // This era got slashed 12 instead of 12.5 because an earlier chunk got 0.5 more - // slashed, and 12 is all the remaining slash + // This last pool got slashed only the leftover dust. Otherwise in principle, this + // chunk/pool should have not been affected. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 19 }, + // This pool got slashed 12.5, which rounded down to 12. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 8 }, + // This pool got slashed 12.5, which rounded down to 12. PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 }, // Bonded pool got slashed for 25, remaining 15 in it. PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } @@ -543,11 +518,8 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] - ); + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); assert_eq!( pool_events_since_last_call(), vec![ @@ -558,10 +530,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { // have two members join let bond = 20; - assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] + vec![StakingEvent::Bonded(POOL1_BONDED, bond)] ); assert_eq!( pool_events_since_last_call(), @@ -570,10 +542,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { // progress and unbond. CurrentEra::::set(Some(99)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] + vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] ); assert_eq!( pool_events_since_last_call(), @@ -597,10 +569,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { 100, ); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] @@ -622,11 +591,8 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] - ); + assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); assert_eq!( pool_events_since_last_call(), vec![ @@ -637,10 +603,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // have two members join let bond = 20; - assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_ok!(Pools::join(Origin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] + vec![StakingEvent::Bonded(POOL1_BONDED, bond)] ); assert_eq!( pool_events_since_last_call(), @@ -649,10 +615,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // progress and unbond. CurrentEra::::set(Some(99)); - assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] + vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] ); assert_eq!( pool_events_since_last_call(), @@ -676,10 +642,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { 100, ); - assert_eq!( - staking_events_since_last_call(), - vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] - ); + assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); assert_eq!( pool_events_since_last_call(), vec![ diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 568dec7b3a340..055ba7b4b3c06 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -43,16 +43,16 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -81,7 +81,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -111,7 +111,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -124,12 +124,10 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking)>; type GenesisElectionProvider = Self::ElectionProvider; - type VoterList = VoterList; - type TargetList = pallet_staking::UseValidatorsMap; + type VoterList = pallet_bags_list::Pallet; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type OnStakerSlash = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); @@ -139,9 +137,8 @@ parameter_types! { pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; } -type VoterBagsListInstance = pallet_bags_list::Instance1; -impl pallet_bags_list::Config for Runtime { - type RuntimeEvent = RuntimeEvent; +impl pallet_bags_list::Config for Runtime { + type Event = Event; type WeightInfo = (); type BagThresholds = BagThresholds; type ScoreProvider = Staking; @@ -168,13 +165,14 @@ parameter_types! { } impl pallet_nomination_pools::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); type Currency = Balances; + type CurrencyBalance = Balance; type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakingInterface = Staking; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; @@ -195,7 +193,7 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, - VoterList: pallet_bags_list::::{Pallet, Call, Storage, Event}, + BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, Pools: pallet_nomination_pools::{Pallet, Call, Storage, Event}, } ); @@ -227,7 +225,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // set some limit for nominations. assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), pallet_staking::ConfigOp::Set(10), // minimum nominator bond pallet_staking::ConfigOp::Noop, pallet_staking::ConfigOp::Noop, @@ -250,7 +248,7 @@ pub(crate) fn pool_events_since_last_call() -> Vec>(); let already_seen = ObservedEventsPools::get(); ObservedEventsPools::set(events.len()); @@ -261,7 +259,7 @@ pub(crate) fn staking_events_since_last_call() -> Vec>(); let already_seen = ObservedEventsStaking::get(); ObservedEventsStaking::set(events.len()); diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 3c7a43068af82..2b8e461b84192 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -23,9 +23,13 @@ pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../b pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../grandpa" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../im-online" } -pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../offences" } +pallet-offences = { version = "4.0.0-dev", default-features = false, features = [ + "runtime-benchmarks", +], path = "../../offences" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } -pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = [ + "runtime-benchmarks", +], path = "../../staking" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } @@ -56,7 +60,3 @@ std = [ "sp-staking/std", "sp-std/std", ] - -runtime-benchmarks = [ - "pallet-staking/runtime-benchmarks", -] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 555ec42882ee1..98c6390964d82 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -17,7 +17,6 @@ //! Offences pallet benchmarking. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] mod mock; @@ -217,7 +216,7 @@ fn make_offenders_im_online( } #[cfg(test)] -fn check_events::RuntimeEvent>>(expected: I) { +fn check_events::Event>>(expected: I) { let events = System::::events() .into_iter() .map(|frame_system::EventRecord { event, .. }| event) @@ -289,13 +288,15 @@ benchmarks! { let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; let keys = ImOnline::::keys(); let validator_set_count = keys.len() as u32; - let offenders_count = offenders.len() as u32; + + let slash_fraction = UnresponsivenessOffence::::slash_fraction( + offenders.len() as u32, validator_set_count, + ); let offence = UnresponsivenessOffence { session_index: 0, validator_set_count, offenders, }; - let slash_fraction = offence.slash_fraction(offenders_count); assert_eq!(System::::event_count(), 0); }: { let _ = ::ReportUnresponsiveness::report_offence( @@ -306,19 +307,19 @@ benchmarks! { verify { let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); let slash_amount = slash_fraction * bond_amount; - let reward_amount = slash_amount.saturating_mul(1 + n) / 2; + let reward_amount = slash_amount * (1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed{staker: id, amount: BalanceOf::::from(slash_amount)}) + ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) ); let balance_slash = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) + ::Event::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) ); let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled{stash: id}) + ::Event::from(StakingEvent::::Chilled(id)) ); let balance_deposit = |id, amount: u32| - ::RuntimeEvent::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); + ::Event::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); let mut first = true; let slash_events = raw_offenders.into_iter() .flat_map(|offender| { @@ -340,7 +341,7 @@ benchmarks! { .flat_map(|reporter| vec![ balance_deposit(reporter.clone(), reward).into(), frame_system::Event::::NewAccount { account: reporter.clone() }.into(), - ::RuntimeEvent::from( + ::Event::from( pallet_balances::Event::::Endowed{account: reporter, free_balance: reward.into()} ).into(), ]) @@ -367,7 +368,7 @@ benchmarks! { check_events::( std::iter::empty() .chain(slash_events.into_iter().map(Into::into)) - .chain(std::iter::once(::RuntimeEvent::from( + .chain(std::iter::once(::Event::from( pallet_offences::Event::Offence{ kind: UnresponsivenessOffence::::ID, timeslot: 0_u32.to_le_bytes().to_vec(), diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index e022d81c5b5bd..d51a81b1212c0 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -40,9 +40,7 @@ type Balance = u64; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - 2u64 * WEIGHT_PER_SECOND - ); + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } impl frame_system::Config for Test { @@ -50,16 +48,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -77,7 +75,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; @@ -128,7 +126,7 @@ impl pallet_session::Config for Test { type ShouldEndSession = pallet_session::PeriodicSessions; type NextSessionRotation = pallet_session::PeriodicSessions; type SessionHandler = TestSessionHandler; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type WeightInfo = (); @@ -148,7 +146,7 @@ parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::testing::TestXt; pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { @@ -156,9 +154,6 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { @@ -168,7 +163,7 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -180,12 +175,10 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); - type ElectionProvider = onchain::OnChainExecution; + type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); @@ -193,7 +186,7 @@ impl pallet_staking::Config for Test { impl pallet_im_online::Config for Test { type AuthorityId = UintAuthorityId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorSet = Historical; type NextSessionRotation = pallet_session::PeriodicSessions; type ReportUnresponsiveness = Offences; @@ -205,23 +198,23 @@ impl pallet_im_online::Config for Test { } impl pallet_offences::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } impl frame_system::offchain::SendTransactionTypes for Test where - RuntimeCall: From, + Call: From, { type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type OverarchingCall = Call; } impl crate::Config for Test {} pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test where diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 7858b02719c4c..e4b75d9c3c015 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -57,7 +57,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From + IsType<::RuntimeEvent>; + type Event: From + IsType<::Event>; /// Full identification of the validator. type IdentificationTuple: Parameter; /// A handler called for every offence report. @@ -120,6 +120,7 @@ where fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { let offenders = offence.offenders(); let time_slot = offence.time_slot(); + let validator_set_count = offence.validator_set_count(); // Go through all offenders in the offence report and find all offenders that were spotted // in unique reports. @@ -133,7 +134,7 @@ where let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed - let new_fraction = offence.slash_fraction(offenders_count); + let new_fraction = O::slash_fraction(offenders_count, validator_set_count); let slash_perbill: Vec<_> = (0..concurrent_offenders.len()).map(|_| new_fraction).collect(); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 31dac8d51d3b1..6a69b54b3cca0 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -40,12 +40,13 @@ use sp_staking::{ offence::{self, DisableStrategy, Kind, OffenceDetails}, SessionIndex, }; +use std::cell::RefCell; pub struct OnOffenceHandler; -parameter_types! { - pub static OnOffencePerbill: Vec = Default::default(); - pub static OffenceWeight: Weight = Default::default(); +thread_local! { + pub static ON_OFFENCE_PERBILL: RefCell> = RefCell::new(Default::default()); + pub static OFFENCE_WEIGHT: RefCell = RefCell::new(Default::default()); } impl offence::OnOffenceHandler @@ -57,16 +58,16 @@ impl offence::OnOffenceHandler _offence_session: SessionIndex, _disable_strategy: DisableStrategy, ) -> Weight { - OnOffencePerbill::mutate(|f| { - *f = slash_fraction.to_vec(); + ON_OFFENCE_PERBILL.with(|f| { + *f.borrow_mut() = slash_fraction.to_vec(); }); - OffenceWeight::get() + OFFENCE_WEIGHT.with(|w| *w.borrow()) } } pub fn with_on_offence_fractions) -> R>(f: F) -> R { - OnOffencePerbill::mutate(|fractions| f(fractions)) + ON_OFFENCE_PERBILL.with(|fractions| f(&mut fractions.borrow_mut())) } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -85,23 +86,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -115,7 +116,7 @@ impl frame_system::Config for Runtime { } impl Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; } @@ -167,8 +168,8 @@ impl offence::Offence for Offence { 1 } - fn slash_fraction(&self, offenders_count: u32) -> Perbill { - Perbill::from_percent(5 + offenders_count * 100 / self.validator_set_count) + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) } } diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 266e05debf050..49bd2fb5a6923 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::{ - new_test_ext, offence_reports, report_id, with_on_offence_fractions, Offence, Offences, - RuntimeEvent, System, KIND, + new_test_ext, offence_reports, report_id, with_on_offence_fractions, Event, Offence, Offences, + System, KIND, }; use frame_system::{EventRecord, Phase}; use sp_runtime::Perbill; @@ -114,7 +114,7 @@ fn should_deposit_event() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Offences(crate::Event::Offence { + event: Event::Offences(crate::Event::Offence { kind: KIND, timeslot: time_slot.encode() }), @@ -148,7 +148,7 @@ fn doesnt_deposit_event_for_dups() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Offences(crate::Event::Offence { + event: Event::Offences(crate::Event::Offence { kind: KIND, timeslot: time_slot.encode() }), diff --git a/frame/preimage/Cargo.toml b/frame/preimage/Cargo.toml index 77046f4fb58b6..325e906c61a3c 100644 --- a/frame/preimage/Cargo.toml +++ b/frame/preimage/Cargo.toml @@ -19,7 +19,6 @@ sp-core = { version = "6.0.0", default-features = false, optional = true, path = sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } -log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -34,16 +33,13 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", - "log/std", "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", ] -try-runtime = [ - "frame-support/try-runtime", -] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/preimage/src/benchmarking.rs b/frame/preimage/src/benchmarking.rs index 8a61d7d780bfd..e0d7e9614abbc 100644 --- a/frame/preimage/src/benchmarking.rs +++ b/frame/preimage/src/benchmarking.rs @@ -35,7 +35,7 @@ fn funded_account(name: &'static str, index: u32) -> T::AccountId { } fn preimage_and_hash() -> (Vec, T::Hash) { - sized_preimage_and_hash::(MAX_SIZE) + sized_preimage_and_hash::(T::MaxSize::get()) } fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { @@ -48,7 +48,7 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { benchmarks! { // Expensive note - will reserve. note_preimage { - let s in 0 .. MAX_SIZE; + let s in 0 .. T::MaxSize::get(); let caller = funded_account::("caller", 0); whitelist_account!(caller); let (preimage, hash) = sized_preimage_and_hash::(s); @@ -58,7 +58,7 @@ benchmarks! { } // Cheap note - will not reserve since it was requested. note_requested_preimage { - let s in 0 .. MAX_SIZE; + let s in 0 .. T::MaxSize::get(); let caller = funded_account::("caller", 0); whitelist_account!(caller); let (preimage, hash) = sized_preimage_and_hash::(s); @@ -69,10 +69,10 @@ benchmarks! { } // Cheap note - will not reserve since it's the manager. note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + let s in 0 .. T::MaxSize::get(); let (preimage, hash) = sized_preimage_and_hash::(s); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: note_preimage(T::ManagerOrigin::successful_origin(), preimage) + }: note_preimage(T::ManagerOrigin::successful_origin(), preimage) verify { assert!(Preimage::::have_preimage(&hash)); } @@ -91,7 +91,7 @@ benchmarks! { unnote_no_deposit_preimage { let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); - }: unnote_preimage(T::ManagerOrigin::successful_origin(), hash) + }: unnote_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert!(!Preimage::::have_preimage(&hash)); } @@ -101,38 +101,33 @@ benchmarks! { let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::("noter", 0); whitelist_account!(noter); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _(T::ManagerOrigin::successful_origin(), hash) + assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter).into(), preimage)); + }: _(T::ManagerOrigin::successful_origin(), hash) verify { - let deposit = T::BaseDeposit::get() + T::ByteDeposit::get() * MAX_SIZE.into(); - let s = RequestStatus::Requested { deposit: Some((noter, deposit)), count: 1, len: Some(MAX_SIZE) }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } // Cheap request - would unreserve the deposit but none was held. request_no_deposit_preimage { let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); - }: request_preimage(T::ManagerOrigin::successful_origin(), hash) + }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - let s = RequestStatus::Requested { deposit: None, count: 2, len: Some(MAX_SIZE) }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } // Cheap request - the preimage is not yet noted, so deposit to unreserve. request_unnoted_preimage { let (_, hash) = preimage_and_hash::(); - }: request_preimage(T::ManagerOrigin::successful_origin(), hash) + }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } // Cheap request - the preimage is already requested, so just a counter bump. request_requested_preimage { let (_, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: request_preimage(T::ManagerOrigin::successful_origin(), hash) + }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - let s = RequestStatus::Requested { deposit: None, count: 2, len: None }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(2))); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. @@ -140,7 +135,7 @@ benchmarks! { let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); - }: _(T::ManagerOrigin::successful_origin(), hash) + }: _(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), None); } @@ -148,7 +143,7 @@ benchmarks! { unrequest_unnoted_preimage { let (_, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) + }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), None); } @@ -157,10 +152,9 @@ benchmarks! { let (_, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) + }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; - assert_eq!(StatusFor::::get(&hash), Some(s)); + assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 6549832c11f5d..09f6ecd52f9ad 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -30,7 +30,6 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] @@ -38,18 +37,15 @@ mod tests; pub mod weights; use sp_runtime::traits::{BadOrigin, Hash, Saturating}; -use sp_std::{borrow::Cow, prelude::*}; +use sp_std::prelude::*; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::Pays, ensure, pallet_prelude::Get, - traits::{ - Currency, Defensive, FetchResult, Hash as PreimageHash, PreimageProvider, - PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, - }, - BoundedSlice, BoundedVec, + traits::{Currency, PreimageProvider, PreimageRecipient, ReservableCurrency}, + weights::Pays, + BoundedVec, }; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -63,31 +59,24 @@ pub use pallet::*; #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] pub enum RequestStatus { /// The associated preimage has not yet been requested by the system. The given deposit (if - /// some) is being held until either it becomes requested or the user retracts the preimage. - Unrequested { deposit: (AccountId, Balance), len: u32 }, + /// some) is being held until either it becomes requested or the user retracts the primage. + Unrequested(Option<(AccountId, Balance)>), /// There are a non-zero number of outstanding requests for this hash by this chain. If there - /// is a preimage registered, then `len` is `Some` and it may be removed iff this counter - /// becomes zero. - Requested { deposit: Option<(AccountId, Balance)>, count: u32, len: Option }, + /// is a preimage registered, then it may be removed iff this counter becomes zero. + Requested(u32), } type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -/// Maximum size of preimage we can store is 4mb. -const MAX_SIZE: u32 = 4 * 1024 * 1024; - #[frame_support::pallet] pub mod pallet { use super::*; - /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The Weight information for this pallet. type WeightInfo: weights::WeightInfo; @@ -97,7 +86,10 @@ pub mod pallet { /// An origin that can request a preimage be placed on-chain without a deposit or fee, or /// manage existing preimages. - type ManagerOrigin: EnsureOrigin; + type ManagerOrigin: EnsureOrigin; + + /// Max size allowed for a preimage. + type MaxSize: Get; /// The base deposit for placing a preimage on chain. type BaseDeposit: Get>; @@ -108,7 +100,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData); #[pallet::event] @@ -125,7 +116,7 @@ pub mod pallet { #[pallet::error] pub enum Error { /// Preimage is too large to store on-chain. - TooBig, + TooLarge, /// Preimage has already been noted on-chain. AlreadyNoted, /// The user is not authorized to perform this action. @@ -143,9 +134,10 @@ pub mod pallet { pub(super) type StatusFor = StorageMap<_, Identity, T::Hash, RequestStatus>>; + /// The preimages stored by this pallet. #[pallet::storage] pub(super) type PreimageFor = - StorageMap<_, Identity, (T::Hash, u32), BoundedVec>>; + StorageMap<_, Identity, T::Hash, BoundedVec>; #[pallet::call] impl Pallet { @@ -158,7 +150,9 @@ pub mod pallet { // We accept a signed origin which will pay a deposit, or a root origin where a deposit // is not taken. let maybe_sender = Self::ensure_signed_or_manager(origin)?; - let (system_requested, _) = Self::note_bytes(bytes.into(), maybe_sender.as_ref())?; + let bounded_vec = + BoundedVec::::try_from(bytes).map_err(|()| Error::::TooLarge)?; + let system_requested = Self::note_bytes(bounded_vec, maybe_sender.as_ref())?; if system_requested || maybe_sender.is_none() { Ok(Pays::No.into()) } else { @@ -167,11 +161,6 @@ pub mod pallet { } /// Clear an unrequested preimage from the runtime storage. - /// - /// If `len` is provided, then it will be a much cheaper operation. - /// - /// - `hash`: The hash of the preimage to be removed from the store. - /// - `len`: The length of the preimage of `hash`. #[pallet::weight(T::WeightInfo::unnote_preimage())] pub fn unnote_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { let maybe_sender = Self::ensure_signed_or_manager(origin)?; @@ -202,9 +191,7 @@ pub mod pallet { impl Pallet { /// Ensure that the origin is either the `ManagerOrigin` or a signed origin. - fn ensure_signed_or_manager( - origin: T::RuntimeOrigin, - ) -> Result, BadOrigin> { + fn ensure_signed_or_manager(origin: T::Origin) -> Result, BadOrigin> { if T::ManagerOrigin::ensure_origin(origin.clone()).is_ok() { return Ok(None) } @@ -214,46 +201,41 @@ impl Pallet { /// Store some preimage on chain. /// - /// If `maybe_depositor` is `None` then it is also requested. If `Some`, then it is not. - /// /// We verify that the preimage is within the bounds of what the pallet supports. /// /// If the preimage was requested to be uploaded, then the user pays no deposits or tx fees. fn note_bytes( - preimage: Cow<[u8]>, + preimage: BoundedVec, maybe_depositor: Option<&T::AccountId>, - ) -> Result<(bool, T::Hash), DispatchError> { + ) -> Result { let hash = T::Hashing::hash(&preimage); - let len = preimage.len() as u32; - ensure!(len <= MAX_SIZE, Error::::TooBig); + ensure!(!PreimageFor::::contains_key(hash), Error::::AlreadyNoted); - // We take a deposit only if there is a provided depositor and the preimage was not + // We take a deposit only if there is a provided depositor, and the preimage was not // previously requested. This also allows the tx to pay no fee. - let status = match (StatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested { count, deposit, .. }), _) => - RequestStatus::Requested { count, deposit, len: Some(len) }, - (Some(RequestStatus::Unrequested { .. }), Some(_)) => + let was_requested = match (StatusFor::::get(hash), maybe_depositor) { + (Some(RequestStatus::Requested(..)), _) => true, + (Some(RequestStatus::Unrequested(..)), _) => return Err(Error::::AlreadyNoted.into()), - (Some(RequestStatus::Unrequested { len, deposit }), None) => - RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) }, - (None, None) => RequestStatus::Requested { count: 1, len: Some(len), deposit: None }, + (None, None) => { + StatusFor::::insert(hash, RequestStatus::Unrequested(None)); + false + }, (None, Some(depositor)) => { let length = preimage.len() as u32; let deposit = T::BaseDeposit::get() .saturating_add(T::ByteDeposit::get().saturating_mul(length.into())); T::Currency::reserve(depositor, deposit)?; - RequestStatus::Unrequested { deposit: (depositor.clone(), deposit), len } + let status = RequestStatus::Unrequested(Some((depositor.clone(), deposit))); + StatusFor::::insert(hash, status); + false }, }; - let was_requested = matches!(status, RequestStatus::Requested { .. }); - StatusFor::::insert(hash, status); - - let _ = Self::insert(&hash, preimage) - .defensive_proof("Unable to insert. Logic error in `note_bytes`?"); + PreimageFor::::insert(hash, preimage); Self::deposit_event(Event::Noted { hash }); - Ok((was_requested, hash)) + Ok(was_requested) } // This function will add a hash to the list of requested preimages. @@ -261,15 +243,19 @@ impl Pallet { // If the preimage already exists before the request is made, the deposit for the preimage is // returned to the user, and removed from their management. fn do_request_preimage(hash: &T::Hash) { - let (count, len, deposit) = - StatusFor::::get(hash).map_or((1, None, None), |x| match x { - RequestStatus::Requested { mut count, len, deposit } => { - count.saturating_inc(); - (count, len, deposit) - }, - RequestStatus::Unrequested { deposit, len } => (1, Some(len), Some(deposit)), - }); - StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); + let count = StatusFor::::get(hash).map_or(1, |x| match x { + RequestStatus::Requested(mut count) => { + count.saturating_inc(); + count + }, + RequestStatus::Unrequested(None) => 1, + RequestStatus::Unrequested(Some((owner, deposit))) => { + // Return the deposit - the preimage now has outstanding requests. + T::Currency::unreserve(&owner, deposit); + 1 + }, + }); + StatusFor::::insert(hash, RequestStatus::Requested(count)); if count == 1 { Self::deposit_event(Event::Requested { hash: *hash }); } @@ -277,8 +263,6 @@ impl Pallet { // Clear a preimage from the storage of the chain, returning any deposit that may be reserved. // - // If `len` is provided, it will be a much cheaper operation. - // // If `maybe_owner` is provided, we verify that it is the correct owner before clearing the // data. fn do_unnote_preimage( @@ -286,102 +270,51 @@ impl Pallet { maybe_check_owner: Option, ) -> DispatchResult { match StatusFor::::get(hash).ok_or(Error::::NotNoted)? { - RequestStatus::Requested { deposit: Some((owner, deposit)), count, len } => { + RequestStatus::Unrequested(Some((owner, deposit))) => { ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); T::Currency::unreserve(&owner, deposit); - StatusFor::::insert( - hash, - RequestStatus::Requested { deposit: None, count, len }, - ); - Ok(()) }, - RequestStatus::Requested { deposit: None, .. } => { + RequestStatus::Unrequested(None) => { ensure!(maybe_check_owner.is_none(), Error::::NotAuthorized); - Self::do_unrequest_preimage(hash) - }, - RequestStatus::Unrequested { deposit: (owner, deposit), len } => { - ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); - T::Currency::unreserve(&owner, deposit); - StatusFor::::remove(hash); - - Self::remove(hash, len); - Self::deposit_event(Event::Cleared { hash: *hash }); - Ok(()) }, + RequestStatus::Requested(_) => return Err(Error::::Requested.into()), } + StatusFor::::remove(hash); + PreimageFor::::remove(hash); + Self::deposit_event(Event::Cleared { hash: *hash }); + Ok(()) } /// Clear a preimage request. fn do_unrequest_preimage(hash: &T::Hash) -> DispatchResult { match StatusFor::::get(hash).ok_or(Error::::NotRequested)? { - RequestStatus::Requested { mut count, len, deposit } if count > 1 => { + RequestStatus::Requested(mut count) if count > 1 => { count.saturating_dec(); - StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); + StatusFor::::insert(hash, RequestStatus::Requested(count)); }, - RequestStatus::Requested { count, len, deposit } => { + RequestStatus::Requested(count) => { debug_assert!(count == 1, "preimage request counter at zero?"); - match (len, deposit) { - // Preimage was never noted. - (None, _) => StatusFor::::remove(hash), - // Preimage was noted without owner - just remove it. - (Some(len), None) => { - Self::remove(hash, len); - StatusFor::::remove(hash); - Self::deposit_event(Event::Cleared { hash: *hash }); - }, - // Preimage was noted with owner - move to unrequested so they can get refund. - (Some(len), Some(deposit)) => { - StatusFor::::insert(hash, RequestStatus::Unrequested { deposit, len }); - }, - } + PreimageFor::::remove(hash); + StatusFor::::remove(hash); + Self::deposit_event(Event::Cleared { hash: *hash }); }, - RequestStatus::Unrequested { .. } => return Err(Error::::NotRequested.into()), + RequestStatus::Unrequested(_) => return Err(Error::::NotRequested.into()), } Ok(()) } - - fn insert(hash: &T::Hash, preimage: Cow<[u8]>) -> Result<(), ()> { - BoundedSlice::>::try_from(preimage.as_ref()) - .map_err(|_| ()) - .map(|s| PreimageFor::::insert((hash, s.len() as u32), s)) - } - - fn remove(hash: &T::Hash, len: u32) { - PreimageFor::::remove((hash, len)) - } - - fn have(hash: &T::Hash) -> bool { - Self::len(hash).is_some() - } - - fn len(hash: &T::Hash) -> Option { - use RequestStatus::*; - match StatusFor::::get(hash) { - Some(Requested { len: Some(len), .. }) | Some(Unrequested { len, .. }) => Some(len), - _ => None, - } - } - - fn fetch(hash: &T::Hash, len: Option) -> FetchResult { - let len = len.or_else(|| Self::len(hash)).ok_or(DispatchError::Unavailable)?; - PreimageFor::::get((hash, len)) - .map(|p| p.into_inner()) - .map(Into::into) - .ok_or(DispatchError::Unavailable) - } } impl PreimageProvider for Pallet { fn have_preimage(hash: &T::Hash) -> bool { - Self::have(hash) + PreimageFor::::contains_key(hash) } fn preimage_requested(hash: &T::Hash) -> bool { - matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) + matches!(StatusFor::::get(hash), Some(RequestStatus::Requested(..))) } fn get_preimage(hash: &T::Hash) -> Option> { - Self::fetch(hash, None).ok().map(Cow::into_owned) + PreimageFor::::get(hash).map(|preimage| preimage.to_vec()) } fn request_preimage(hash: &T::Hash) { @@ -395,60 +328,15 @@ impl PreimageProvider for Pallet { } impl PreimageRecipient for Pallet { - type MaxSize = ConstU32; // 2**22 + type MaxSize = T::MaxSize; fn note_preimage(bytes: BoundedVec) { // We don't really care if this fails, since that's only the case if someone else has // already noted it. - let _ = Self::note_bytes(bytes.into_inner().into(), None); + let _ = Self::note_bytes(bytes, None); } fn unnote_preimage(hash: &T::Hash) { - // Should never fail if authorization check is skipped. - let res = Self::do_unrequest_preimage(hash); - debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); - } -} - -impl> QueryPreimage for Pallet { - fn len(hash: &T::Hash) -> Option { - Pallet::::len(hash) - } - - fn fetch(hash: &T::Hash, len: Option) -> FetchResult { - Pallet::::fetch(hash, len) - } - - fn is_requested(hash: &T::Hash) -> bool { - matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) - } - - fn request(hash: &T::Hash) { - Self::do_request_preimage(hash) - } - - fn unrequest(hash: &T::Hash) { - let res = Self::do_unrequest_preimage(hash); - debug_assert!(res.is_ok(), "do_unrequest_preimage failed - counter underflow?"); - } -} - -impl> StorePreimage for Pallet { - const MAX_LENGTH: usize = MAX_SIZE as usize; - - fn note(bytes: Cow<[u8]>) -> Result { - // We don't really care if this fails, since that's only the case if someone else has - // already noted it. - let maybe_hash = Self::note_bytes(bytes, None).map(|(_, h)| h); - // Map to the correct trait error. - if maybe_hash == Err(DispatchError::from(Error::::TooBig)) { - Err(DispatchError::Exhausted) - } else { - maybe_hash - } - } - - fn unnote(hash: &T::Hash) { // Should never fail if authorization check is skipped. let res = Self::do_unnote_preimage(hash, None); debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); diff --git a/frame/preimage/src/migration.rs b/frame/preimage/src/migration.rs deleted file mode 100644 index a5d15c23c758a..0000000000000 --- a/frame/preimage/src/migration.rs +++ /dev/null @@ -1,263 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Storage migrations for the preimage pallet. - -use super::*; -use frame_support::{ - storage_alias, - traits::{ConstU32, OnRuntimeUpgrade}, -}; -use sp_std::collections::btree_map::BTreeMap; - -/// The log target. -const TARGET: &'static str = "runtime::preimage::migration::v1"; - -/// The original data layout of the preimage pallet without a specific version number. -mod v0 { - use super::*; - - #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] - pub enum RequestStatus { - Unrequested(Option<(AccountId, Balance)>), - Requested(u32), - } - - #[storage_alias] - pub type PreimageFor = StorageMap< - Pallet, - Identity, - ::Hash, - BoundedVec>, - >; - - #[storage_alias] - pub type StatusFor = StorageMap< - Pallet, - Identity, - ::Hash, - RequestStatus<::AccountId, BalanceOf>, - >; - - /// Returns the number of images or `None` if the storage is corrupted. - #[cfg(feature = "try-runtime")] - pub fn image_count() -> Option { - let images = v0::PreimageFor::::iter_values().count() as u32; - let status = v0::StatusFor::::iter_values().count() as u32; - - if images == status { - Some(images) - } else { - None - } - } -} - -pub mod v1 { - use super::*; - - /// Migration for moving preimage from V0 to V1 storage. - /// - /// Note: This needs to be run with the same hashing algorithm as before - /// since it is not re-hashing the preimages. - pub struct Migration(sp_std::marker::PhantomData); - - impl OnRuntimeUpgrade for Migration { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); - - let images = v0::image_count::().expect("v0 storage corrupted"); - log::info!(target: TARGET, "Migrating {} images", &images); - Ok((images as u32).encode()) - } - - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - if StorageVersion::get::>() != 0 { - log::warn!( - target: TARGET, - "skipping MovePreimagesIntoBuckets: executed on wrong storage version.\ - Expected version 0" - ); - return weight - } - - let status = v0::StatusFor::::drain().collect::>(); - weight.saturating_accrue(T::DbWeight::get().reads(status.len() as u64)); - - let preimages = v0::PreimageFor::::drain().collect::>(); - weight.saturating_accrue(T::DbWeight::get().reads(preimages.len() as u64)); - - for (hash, status) in status.into_iter() { - let preimage = if let Some(preimage) = preimages.get(&hash) { - preimage - } else { - log::error!(target: TARGET, "preimage not found for hash {:?}", &hash); - continue - }; - let len = preimage.len() as u32; - if len > MAX_SIZE { - log::error!( - target: TARGET, - "preimage too large for hash {:?}, len: {}", - &hash, - len - ); - continue - } - - let status = match status { - v0::RequestStatus::Unrequested(deposit) => match deposit { - Some(deposit) => RequestStatus::Unrequested { deposit, len }, - // `None` depositor becomes system-requested. - None => - RequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, - }, - v0::RequestStatus::Requested(count) if count == 0 => { - log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); - continue - }, - v0::RequestStatus::Requested(count) => - RequestStatus::Requested { deposit: None, count, len: Some(len) }, - }; - log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); - - crate::StatusFor::::insert(hash, status); - crate::PreimageFor::::insert(&(hash, len), preimage); - - weight.saturating_accrue(T::DbWeight::get().writes(2)); - } - StorageVersion::new(1).put::>(); - - weight.saturating_add(T::DbWeight::get().writes(1)) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let old_images: u32 = - Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); - let new_images = image_count::().expect("V1 storage corrupted"); - - if new_images != old_images { - log::error!( - target: TARGET, - "migrated {} images, expected {}", - new_images, - old_images - ); - } - assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); - Ok(()) - } - } - - /// Returns the number of images or `None` if the storage is corrupted. - #[cfg(feature = "try-runtime")] - pub fn image_count() -> Option { - // Use iter_values() to ensure that the values are decodable. - let images = crate::PreimageFor::::iter_values().count() as u32; - let status = crate::StatusFor::::iter_values().count() as u32; - - if images == status { - Some(images) - } else { - None - } - } -} - -#[cfg(test)] -#[cfg(feature = "try-runtime")] -mod test { - use super::*; - use crate::mock::{Test as T, *}; - - use frame_support::bounded_vec; - - #[test] - fn migration_works() { - new_test_ext().execute_with(|| { - assert_eq!(StorageVersion::get::>(), 0); - // Insert some preimages into the v0 storage: - - // Case 1: Unrequested without deposit - let (p, h) = preimage::(128); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(None)); - // Case 2: Unrequested with deposit - let (p, h) = preimage::(1024); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(Some((1, 1)))); - // Case 3: Requested by 0 (invalid) - let (p, h) = preimage::(8192); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Requested(0)); - // Case 4: Requested by 10 - let (p, h) = preimage::(65536); - v0::PreimageFor::::insert(h, p); - v0::StatusFor::::insert(h, v0::RequestStatus::Requested(10)); - - assert_eq!(v0::image_count::(), Some(4)); - assert_eq!(v1::image_count::(), None, "V1 storage should be corrupted"); - - let state = v1::Migration::::pre_upgrade().unwrap(); - let _w = v1::Migration::::on_runtime_upgrade(); - v1::Migration::::post_upgrade(state).unwrap(); - - // V0 and V1 share the same prefix, so `iter_values` still counts the same. - assert_eq!(v0::image_count::(), Some(3)); - assert_eq!(v1::image_count::(), Some(3)); // One gets skipped therefore 3. - assert_eq!(StorageVersion::get::>(), 1); - - // Case 1: Unrequested without deposit becomes system-requested - let (p, h) = preimage::(128); - assert_eq!(crate::PreimageFor::::get(&(h, 128)), Some(p)); - assert_eq!( - crate::StatusFor::::get(h), - Some(RequestStatus::Requested { deposit: None, count: 1, len: Some(128) }) - ); - // Case 2: Unrequested with deposit becomes unrequested - let (p, h) = preimage::(1024); - assert_eq!(crate::PreimageFor::::get(&(h, 1024)), Some(p)); - assert_eq!( - crate::StatusFor::::get(h), - Some(RequestStatus::Unrequested { deposit: (1, 1), len: 1024 }) - ); - // Case 3: Requested by 0 should be skipped - let (_, h) = preimage::(8192); - assert_eq!(crate::PreimageFor::::get(&(h, 8192)), None); - assert_eq!(crate::StatusFor::::get(h), None); - // Case 4: Requested by 10 becomes requested by 10 - let (p, h) = preimage::(65536); - assert_eq!(crate::PreimageFor::::get(&(h, 65536)), Some(p)); - assert_eq!( - crate::StatusFor::::get(h), - Some(RequestStatus::Requested { deposit: None, count: 10, len: Some(65536) }) - ); - }); - } - - /// Returns a preimage with a given size and its hash. - fn preimage( - len: usize, - ) -> (BoundedVec>, ::Hash) { - let p = bounded_vec![1; len]; - let h = ::Hashing::hash_of(&p); - (p, h) - } -} diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index ce74ea65bd8aa..109806049a0fd 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -50,15 +50,15 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); + frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); } impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -66,7 +66,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -81,7 +81,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; @@ -102,9 +102,10 @@ ord_parameter_types! { impl Config for Test { type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type ManagerOrigin = EnsureSignedBy; + type MaxSize = ConstU32<1024>; type BaseDeposit = ConstU64<2>; type ByteDeposit = ConstU64<1>; } diff --git a/frame/preimage/src/tests.rs b/frame/preimage/src/tests.rs index f480b9c36b670..721bb128de239 100644 --- a/frame/preimage/src/tests.rs +++ b/frame/preimage/src/tests.rs @@ -17,40 +17,16 @@ //! # Scheduler tests. -#![cfg(test)] - use super::*; use crate::mock::*; -use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_vec, - traits::{Bounded, BoundedInline, Hash as PreimageHash}, - StorageNoopGuard, -}; +use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; -use sp_core::{blake2_256, H256}; - -/// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. -pub fn make_bounded_values() -> (Bounded>, Bounded>, Bounded>) { - let data: BoundedInline = bounded_vec![1]; - let inline = Bounded::>::Inline(data); - - let data = vec![1, 2]; - let hash: H256 = blake2_256(&data[..]).into(); - let len = data.len() as u32; - let lookup = Bounded::>::unrequested(hash, len); - - let data = vec![1, 2, 3]; - let hash: H256 = blake2_256(&data[..]).into(); - let legacy = Bounded::>::Legacy { hash, dummy: Default::default() }; - - (inline, lookup, legacy) -} #[test] fn user_note_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); assert_eq!(Balances::reserved_balance(2), 3); assert_eq!(Balances::free_balance(2), 97); @@ -59,11 +35,11 @@ fn user_note_preimage_works() { assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); assert_noop!( - Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1]), + Preimage::note_preimage(Origin::signed(2), vec![1]), Error::::AlreadyNoted ); assert_noop!( - Preimage::note_preimage(RuntimeOrigin::signed(0), vec![2]), + Preimage::note_preimage(Origin::signed(0), vec![2]), BalancesError::::InsufficientBalance ); }); @@ -72,7 +48,7 @@ fn user_note_preimage_works() { #[test] fn manager_note_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::free_balance(1), 100); @@ -80,26 +56,29 @@ fn manager_note_preimage_works() { assert!(Preimage::have_preimage(&h)); assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_noop!( + Preimage::note_preimage(Origin::signed(1), vec![1]), + Error::::AlreadyNoted + ); }); } #[test] fn user_unnote_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(3), hashed([1])), + Preimage::unnote_preimage(Origin::signed(3), hashed([1])), Error::::NotAuthorized ); assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([2])), + Preimage::unnote_preimage(Origin::signed(2), hashed([2])), Error::::NotNoted ); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(Origin::signed(2), hashed([1]))); assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1])), + Preimage::unnote_preimage(Origin::signed(2), hashed([1])), Error::::NotNoted ); @@ -112,10 +91,10 @@ fn user_unnote_preimage_works() { #[test] fn manager_unnote_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); + assert_ok!(Preimage::unnote_preimage(Origin::signed(1), hashed([1]))); assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1])), + Preimage::unnote_preimage(Origin::signed(1), hashed([1])), Error::::NotNoted ); @@ -128,17 +107,17 @@ fn manager_unnote_preimage_works() { #[test] fn manager_unnote_user_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(3), hashed([1])), + Preimage::unnote_preimage(Origin::signed(3), hashed([1])), Error::::NotAuthorized ); assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([2])), + Preimage::unnote_preimage(Origin::signed(2), hashed([2])), Error::::NotNoted ); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(Origin::signed(1), hashed([1]))); let h = hashed([1]); assert!(!Preimage::have_preimage(&h)); @@ -149,35 +128,32 @@ fn manager_unnote_user_preimage_works() { #[test] fn requested_then_noted_preimage_cannot_be_unnoted() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); - // it's still here. + assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_noop!( + Preimage::unnote_preimage(Origin::signed(1), hashed([1])), + Error::::Requested + ); let h = hashed([1]); assert!(Preimage::have_preimage(&h)); assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - - // now it's gone - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert!(!Preimage::have_preimage(&hashed([1]))); }); } #[test] fn request_note_order_makes_no_difference() { let one_way = new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -189,8 +165,8 @@ fn request_note_order_makes_no_difference() { #[test] fn requested_then_user_noted_preimage_is_free() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); assert_eq!(Balances::reserved_balance(2), 0); assert_eq!(Balances::free_balance(2), 100); @@ -203,17 +179,16 @@ fn requested_then_user_noted_preimage_is_free() { #[test] fn request_user_note_order_makes_no_difference() { let one_way = new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -225,20 +200,20 @@ fn request_user_note_order_makes_no_difference() { #[test] fn unrequest_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); assert_noop!( - Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([2])), + Preimage::unrequest_preimage(Origin::signed(1), hashed([2])), Error::::NotRequested ); - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unrequest_preimage(Origin::signed(1), hashed([1]))); assert!(Preimage::have_preimage(&hashed([1]))); - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unrequest_preimage(Origin::signed(1), hashed([1]))); assert_noop!( - Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1])), + Preimage::unrequest_preimage(Origin::signed(1), hashed([1])), Error::::NotRequested ); }); @@ -247,244 +222,12 @@ fn unrequest_preimage_works() { #[test] fn user_noted_then_requested_preimage_is_refunded_once_only() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1; 3])); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1; 3])); + assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::unrequest_preimage(Origin::signed(1), hashed([1]))); // Still have reserve from `vec[1; 3]`. assert_eq!(Balances::reserved_balance(2), 5); assert_eq!(Balances::free_balance(2), 95); }); } - -#[test] -fn noted_preimage_use_correct_map() { - new_test_ext().execute_with(|| { - // Add one preimage per bucket... - for i in 0..7 { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; 128 << (i * 2)])); - } - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; MAX_SIZE as usize])); - assert_eq!(PreimageFor::::iter().count(), 8); - - // All are present - assert_eq!(StatusFor::::iter().count(), 8); - - // Now start removing them again... - for i in 0..7 { - assert_ok!(Preimage::unnote_preimage( - RuntimeOrigin::signed(1), - hashed(vec![0; 128 << (i * 2)]) - )); - } - assert_eq!(PreimageFor::::iter().count(), 1); - assert_ok!(Preimage::unnote_preimage( - RuntimeOrigin::signed(1), - hashed(vec![0; MAX_SIZE as usize]) - )); - assert_eq!(PreimageFor::::iter().count(), 0); - - // All are gone - assert_eq!(StatusFor::::iter().count(), 0); - }); -} - -/// The `StorePreimage` and `QueryPreimage` traits work together. -#[test] -fn query_and_store_preimage_workflow() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let data: Vec = vec![1; 512]; - let encoded = data.encode(); - - // Bound an unbound value. - let bound = Preimage::bound(data.clone()).unwrap(); - let (len, hash) = (bound.len().unwrap(), bound.hash()); - - assert_eq!(hash, blake2_256(&encoded).into()); - assert_eq!(bound.len(), Some(len)); - assert!(bound.lookup_needed(), "Should not be Inlined"); - assert_eq!(bound.lookup_len(), Some(len)); - - // The value is requested and available. - assert!(Preimage::is_requested(&hash)); - assert!(::have(&bound)); - assert_eq!(Preimage::len(&hash), Some(len)); - - // It can be fetched with length. - assert_eq!(Preimage::fetch(&hash, Some(len)).unwrap(), encoded); - // ... and without length. - assert_eq!(Preimage::fetch(&hash, None).unwrap(), encoded); - // ... but not with wrong length. - assert_err!(Preimage::fetch(&hash, Some(0)), DispatchError::Unavailable); - - // It can be peeked and decoded correctly. - assert_eq!(Preimage::peek::>(&bound).unwrap(), (data.clone(), Some(len))); - // Request it two more times. - assert_eq!(Preimage::pick::>(hash, len), bound); - Preimage::request(&hash); - // It is requested thrice. - assert!(matches!( - StatusFor::::get(&hash).unwrap(), - RequestStatus::Requested { count: 3, .. } - )); - - // It can be realized and decoded correctly. - assert_eq!(Preimage::realize::>(&bound).unwrap(), (data.clone(), Some(len))); - assert!(matches!( - StatusFor::::get(&hash).unwrap(), - RequestStatus::Requested { count: 2, .. } - )); - // Dropping should unrequest. - Preimage::drop(&bound); - assert!(matches!( - StatusFor::::get(&hash).unwrap(), - RequestStatus::Requested { count: 1, .. } - )); - - // Is still available. - assert!(::have(&bound)); - // Manually unnote it. - Preimage::unnote(&hash); - // Is not available anymore. - assert!(!::have(&bound)); - assert_err!(Preimage::fetch(&hash, Some(len)), DispatchError::Unavailable); - // And not requested since the traits assume permissioned origin. - assert!(!Preimage::is_requested(&hash)); - - // No storage changes remain. Checked by `StorageNoopGuard`. - }); -} - -/// The request function behaves as expected. -#[test] -fn query_preimage_request_works() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let data: Vec = vec![1; 10]; - let hash: PreimageHash = blake2_256(&data[..]).into(); - - // Request the preimage. - ::request(&hash); - - // The preimage is requested with unknown length and cannot be fetched. - assert!(::is_requested(&hash)); - assert!(::len(&hash).is_none()); - assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); - - // Request again. - ::request(&hash); - // The preimage is still requested. - assert!(::is_requested(&hash)); - assert!(::len(&hash).is_none()); - assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); - // But there is only one entry in the map. - assert_eq!(StatusFor::::iter().count(), 1); - - // Un-request the preimage. - ::unrequest(&hash); - // It is still requested. - assert!(::is_requested(&hash)); - // Un-request twice. - ::unrequest(&hash); - // It is not requested anymore. - assert!(!::is_requested(&hash)); - // And there is no entry in the map. - assert_eq!(StatusFor::::iter().count(), 0); - }); -} - -/// The `QueryPreimage` functions can be used together with `Bounded` values. -#[test] -fn query_preimage_hold_and_drop_work() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let (inline, lookup, legacy) = make_bounded_values(); - - // `hold` does nothing for `Inline` values. - assert_storage_noop!(::hold(&inline)); - // `hold` requests `Lookup` values. - ::hold(&lookup); - assert!(::is_requested(&lookup.hash())); - // `hold` requests `Legacy` values. - ::hold(&legacy); - assert!(::is_requested(&legacy.hash())); - - // There are two values requested in total. - assert_eq!(StatusFor::::iter().count(), 2); - - // Cleanup by dropping both. - ::drop(&lookup); - assert!(!::is_requested(&lookup.hash())); - ::drop(&legacy); - assert!(!::is_requested(&legacy.hash())); - - // There are no values requested anymore. - assert_eq!(StatusFor::::iter().count(), 0); - }); -} - -/// The `StorePreimage` trait works as expected. -#[test] -fn store_preimage_basic_works() { - new_test_ext().execute_with(|| { - let _guard = StorageNoopGuard::default(); - let data: Vec = vec![1; 512]; // Too large to inline. - let encoded = Cow::from(data.encode()); - - // Bound the data. - let bound = ::bound(data.clone()).unwrap(); - // The preimage can be peeked. - assert_ok!(::peek(&bound)); - // Un-note the preimage. - ::unnote(&bound.hash()); - // The preimage cannot be peeked anymore. - assert_err!(::peek(&bound), DispatchError::Unavailable); - // Noting the wrong pre-image does not make it peek-able. - assert_ok!(::note(Cow::Borrowed(&data))); - assert_err!(::peek(&bound), DispatchError::Unavailable); - - // Manually note the preimage makes it peek-able again. - assert_ok!(::note(encoded.clone())); - // Noting again works. - assert_ok!(::note(encoded)); - assert_ok!(::peek(&bound)); - - // Cleanup. - ::unnote(&bound.hash()); - let data_hash = blake2_256(&data); - ::unnote(&data_hash.into()); - - // No storage changes remain. Checked by `StorageNoopGuard`. - }); -} - -#[test] -fn store_preimage_note_too_large_errors() { - new_test_ext().execute_with(|| { - // Works with `MAX_LENGTH`. - let len = ::MAX_LENGTH; - let data = vec![0u8; len]; - assert_ok!(::note(data.into())); - - // Errors with `MAX_LENGTH+1`. - let data = vec![0u8; len + 1]; - assert_err!(::note(data.into()), DispatchError::Exhausted); - }); -} - -#[test] -fn store_preimage_bound_too_large_errors() { - new_test_ext().execute_with(|| { - // Using `MAX_LENGTH` number of bytes in a vector does not work - // since SCALE prepends the length. - let len = ::MAX_LENGTH; - let data: Vec = vec![0; len]; - assert_err!(::bound(data.clone()), DispatchError::Exhausted); - - // Works with `MAX_LENGTH-4`. - let data: Vec = vec![0; len - 4]; - assert_ok!(::bound(data.clone())); - }); -} diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index e73c986891ccd..de3eb6607fe8c 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/preimage/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/preimage/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,206 +61,178 @@ pub trait WeightInfo { /// Weights for pallet_preimage using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) - /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - // Minimum execution time: 33_810 nanoseconds. - Weight::from_ref_time(34_299_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) - /// The range of component `s` is `[0, 4194304]`. + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - // Minimum execution time: 24_398 nanoseconds. - Weight::from_ref_time(24_839_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_702 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) - /// The range of component `s` is `[0, 4194304]`. + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - // Minimum execution time: 22_235 nanoseconds. - Weight::from_ref_time(22_473_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - // Minimum execution time: 43_241 nanoseconds. - Weight::from_ref_time(44_470_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (44_380_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - // Minimum execution time: 29_529 nanoseconds. - Weight::from_ref_time(30_364_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (30_280_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - // Minimum execution time: 28_914 nanoseconds. - Weight::from_ref_time(30_103_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (42_809_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - // Minimum execution time: 14_479 nanoseconds. - Weight::from_ref_time(15_244_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (28_964_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - // Minimum execution time: 20_171 nanoseconds. - Weight::from_ref_time(20_806_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (17_555_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - // Minimum execution time: 9_756 nanoseconds. - Weight::from_ref_time(10_115_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (7_745_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - // Minimum execution time: 28_379 nanoseconds. - Weight::from_ref_time(29_778_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (29_758_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - // Minimum execution time: 9_595 nanoseconds. - Weight::from_ref_time(9_888_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_360_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - // Minimum execution time: 9_642 nanoseconds. - Weight::from_ref_time(9_985_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (7_439_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { + // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) - /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - // Minimum execution time: 33_810 nanoseconds. - Weight::from_ref_time(34_299_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) - /// The range of component `s` is `[0, 4194304]`. + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - // Minimum execution time: 24_398 nanoseconds. - Weight::from_ref_time(24_839_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_702 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) - /// The range of component `s` is `[0, 4194304]`. + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - // Minimum execution time: 22_235 nanoseconds. - Weight::from_ref_time(22_473_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - // Minimum execution time: 43_241 nanoseconds. - Weight::from_ref_time(44_470_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (44_380_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - // Minimum execution time: 29_529 nanoseconds. - Weight::from_ref_time(30_364_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (30_280_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - // Minimum execution time: 28_914 nanoseconds. - Weight::from_ref_time(30_103_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (42_809_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - // Minimum execution time: 14_479 nanoseconds. - Weight::from_ref_time(15_244_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (28_964_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - // Minimum execution time: 20_171 nanoseconds. - Weight::from_ref_time(20_806_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (17_555_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - // Minimum execution time: 9_756 nanoseconds. - Weight::from_ref_time(10_115_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (7_745_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - // Minimum execution time: 28_379 nanoseconds. - Weight::from_ref_time(29_778_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (29_758_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - // Minimum execution time: 9_595 nanoseconds. - Weight::from_ref_time(9_888_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_360_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - // Minimum execution time: 9_642 nanoseconds. - Weight::from_ref_time(9_985_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (7_439_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index afec89ad40fb8..aaacaa23021e7 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -30,7 +30,6 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 58c0cb73011df..87017290a3ab9 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -27,7 +27,7 @@ use sp_runtime::traits::Bounded; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -35,11 +35,9 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), let caller = maybe_who.unwrap_or_else(whitelisted_caller); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); for i in 0..n { - let real = T::Lookup::unlookup(account("target", i, SEED)); - Proxy::::add_proxy( RawOrigin::Signed(caller.clone()).into(), - real, + account("target", i, SEED), T::ProxyType::default(), T::BlockNumber::zero(), )?; @@ -53,7 +51,6 @@ fn add_announcements( maybe_real: Option, ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); - let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); let real = if let Some(real) = maybe_real { real @@ -62,17 +59,16 @@ fn add_announcements( T::Currency::make_free_balance_be(&real, BalanceOf::::max_value() / 2u32.into()); Proxy::::add_proxy( RawOrigin::Signed(real.clone()).into(), - caller_lookup, + caller.clone(), T::ProxyType::default(), T::BlockNumber::zero(), )?; real }; - let real_lookup = T::Lookup::unlookup(real); for _ in 0..n { Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real_lookup.clone(), + real.clone(), T::CallHasher::hash_of(&("add_announcement", n)), )?; } @@ -87,9 +83,8 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); - }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } @@ -98,21 +93,19 @@ benchmarks! { let a in 0 .. T::MaxPending::get() - 1; let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; // In this case the caller is the "target" proxy - let caller: T::AccountId = account("pure", 0, SEED); + let caller: T::AccountId = account("anonymous", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); - let delegate_lookup = T::Lookup::unlookup(delegate.clone()); T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), - real_lookup.clone(), + real.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(delegate.clone()), None)?; - }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) + }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } @@ -125,15 +118,14 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real_lookup.clone(), + real.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) + }: _(RawOrigin::Signed(caller.clone()), real, T::CallHasher::hash_of(&call)) verify { let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); @@ -144,19 +136,17 @@ benchmarks! { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); - let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let real_lookup = T::Lookup::unlookup(real.clone()); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real_lookup, + real.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) + }: _(RawOrigin::Signed(real), caller.clone(), T::CallHasher::hash_of(&call)) verify { let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); @@ -170,11 +160,10 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); - }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) + }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) verify { assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); } @@ -182,10 +171,9 @@ benchmarks! { add_proxy { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); }: _( RawOrigin::Signed(caller.clone()), - real, + account("target", T::MaxProxies::get(), SEED), T::ProxyType::default(), T::BlockNumber::zero() ) @@ -197,10 +185,9 @@ benchmarks! { remove_proxy { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - let delegate = T::Lookup::unlookup(account("target", 0, SEED)); }: _( RawOrigin::Signed(caller.clone()), - delegate, + account("target", 0, SEED), T::ProxyType::default(), T::BlockNumber::zero() ) @@ -218,7 +205,7 @@ benchmarks! { assert_eq!(proxies.len() as u32, 0); } - create_pure { + anonymous { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( @@ -228,22 +215,21 @@ benchmarks! { 0 ) verify { - let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(Event::PureCreated { - pure: pure_account, + let anon_account = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(Event::AnonymousCreated { + anonymous: anon_account, who: caller, proxy_type: T::ProxyType::default(), disambiguation_index: 0, }.into()); } - kill_pure { + kill_anonymous { let p in 0 .. (T::MaxProxies::get() - 2); let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Pallet::::create_pure( + Pallet::::anonymous( RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), T::BlockNumber::zero(), @@ -251,13 +237,13 @@ benchmarks! { )?; let height = system::Pallet::::block_number(); let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); - let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); + let anon = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); - add_proxies::(p, Some(pure_account.clone()))?; - ensure!(Proxies::::contains_key(&pure_account), "pure proxy not created"); - }: _(RawOrigin::Signed(pure_account.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) + add_proxies::(p, Some(anon.clone()))?; + ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); + }: _(RawOrigin::Signed(anon.clone()), caller.clone(), T::ProxyType::default(), 0, height, ext_index) verify { - assert!(!Proxies::::contains_key(&pure_account)); + assert!(!Proxies::::contains_key(&anon)); } impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 5c07a2b012243..9945626efbeb1 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -35,16 +35,17 @@ pub mod weights; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchError, GetDispatchInfo}, + dispatch::DispatchError, ensure, traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, + weights::GetDispatchInfo, RuntimeDebug, }; use frame_system::{self as system}; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, + traits::{Dispatchable, Hash, Saturating, TrailingZeroInput, Zero}, DispatchResult, }; use sp_std::prelude::*; @@ -57,8 +58,6 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - /// The parameters under which a particular account has a proxy relationship with some other /// account. #[derive( @@ -109,15 +108,15 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The overarching call type. - type RuntimeCall: Parameter - + Dispatchable + type Call: Parameter + + Dispatchable + GetDispatchInfo + From> + IsSubType> - + IsType<::RuntimeCall>; + + IsType<::Call>; /// The currency mechanism. type Currency: ReservableCurrency; @@ -130,7 +129,7 @@ pub mod pallet { + Member + Ord + PartialOrd - + InstanceFilter<::RuntimeCall> + + InstanceFilter<::Call> + Default + MaxEncodedLen; @@ -191,22 +190,25 @@ pub mod pallet { /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. + /// + /// # + /// Weight is a function of the number of proxies the user has (P). + /// # #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get()) + .saturating_add(di.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(di.weight), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) })] pub fn proxy( origin: OriginFor, - real: AccountIdLookupOf, + real: T::AccountId, force_proxy_type: Option, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; - let real = T::Lookup::lookup(real)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; ensure!(def.delay.is_zero(), Error::::Unannounced); @@ -224,15 +226,18 @@ pub mod pallet { /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be /// zero. + /// + /// # + /// Weight is a function of the number of proxies the user has (P). + /// # #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] pub fn add_proxy( origin: OriginFor, - delegate: AccountIdLookupOf, + delegate: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; - let delegate = T::Lookup::lookup(delegate)?; Self::add_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -243,15 +248,18 @@ pub mod pallet { /// Parameters: /// - `proxy`: The account that the `caller` would like to remove as a proxy. /// - `proxy_type`: The permissions currently enabled for the removed proxy account. + /// + /// # + /// Weight is a function of the number of proxies the user has (P). + /// # #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] pub fn remove_proxy( origin: OriginFor, - delegate: AccountIdLookupOf, + delegate: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; - let delegate = T::Lookup::lookup(delegate)?; Self::remove_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -259,8 +267,12 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_. /// - /// WARNING: This may be called on accounts created by `pure`, however if done, then + /// WARNING: This may be called on accounts created by `anonymous`, however if done, then /// the unreserved fees will be inaccessible. **All access to this account will be lost.** + /// + /// # + /// Weight is a function of the number of proxies the user has (P). + /// # #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get()))] pub fn remove_proxies(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -288,8 +300,13 @@ pub mod pallet { /// same sender, with the same parameters. /// /// Fails if there are insufficient funds to pay for deposit. - #[pallet::weight(T::WeightInfo::create_pure(T::MaxProxies::get()))] - pub fn create_pure( + /// + /// # + /// Weight is a function of the number of proxies the user has (P). + /// # + /// TODO: Might be over counting 1 read + #[pallet::weight(T::WeightInfo::anonymous(T::MaxProxies::get()))] + pub fn anonymous( origin: OriginFor, proxy_type: T::ProxyType, delay: T::BlockNumber, @@ -297,8 +314,8 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - let pure = Self::pure_account(&who, &proxy_type, index, None); - ensure!(!Proxies::::contains_key(&pure), Error::::Duplicate); + let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); + ensure!(!Proxies::::contains_key(&anonymous), Error::::Duplicate); let proxy_def = ProxyDefinition { delegate: who.clone(), proxy_type: proxy_type.clone(), delay }; @@ -308,9 +325,9 @@ pub mod pallet { let deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get(); T::Currency::reserve(&who, deposit)?; - Proxies::::insert(&pure, (bounded_proxies, deposit)); - Self::deposit_event(Event::PureCreated { - pure, + Proxies::::insert(&anonymous, (bounded_proxies, deposit)); + Self::deposit_event(Event::AnonymousCreated { + anonymous, who, proxy_type, disambiguation_index: index, @@ -319,36 +336,39 @@ pub mod pallet { Ok(()) } - /// Removes a previously spawned pure proxy. + /// Removes a previously spawned anonymous proxy. /// /// WARNING: **All access to this account will be lost.** Any funds held in it will be /// inaccessible. /// /// Requires a `Signed` origin, and the sender account must have been created by a call to - /// `pure` with corresponding parameters. - /// - /// - `spawner`: The account that originally called `pure` to create this account. - /// - `index`: The disambiguation index originally passed to `pure`. Probably `0`. - /// - `proxy_type`: The proxy type originally passed to `pure`. - /// - `height`: The height of the chain when the call to `pure` was processed. - /// - `ext_index`: The extrinsic index in which the call to `pure` was processed. - /// - /// Fails with `NoPermission` in case the caller is not a previously created pure - /// account whose `pure` call has corresponding parameters. - #[pallet::weight(T::WeightInfo::kill_pure(T::MaxProxies::get()))] - pub fn kill_pure( + /// `anonymous` with corresponding parameters. + /// + /// - `spawner`: The account that originally called `anonymous` to create this account. + /// - `index`: The disambiguation index originally passed to `anonymous`. Probably `0`. + /// - `proxy_type`: The proxy type originally passed to `anonymous`. + /// - `height`: The height of the chain when the call to `anonymous` was processed. + /// - `ext_index`: The extrinsic index in which the call to `anonymous` was processed. + /// + /// Fails with `NoPermission` in case the caller is not a previously created anonymous + /// account whose `anonymous` call has corresponding parameters. + /// + /// # + /// Weight is a function of the number of proxies the user has (P). + /// # + #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get()))] + pub fn kill_anonymous( origin: OriginFor, - spawner: AccountIdLookupOf, + spawner: T::AccountId, proxy_type: T::ProxyType, index: u16, #[pallet::compact] height: T::BlockNumber, #[pallet::compact] ext_index: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; - let spawner = T::Lookup::lookup(spawner)?; let when = (height, ext_index); - let proxy = Self::pure_account(&spawner, &proxy_type, index, Some(when)); + let proxy = Self::anonymous_account(&spawner, &proxy_type, index, Some(when)); ensure!(proxy == who, Error::::NoPermission); let (_, deposit) = Proxies::::take(&who); @@ -372,14 +392,19 @@ pub mod pallet { /// Parameters: /// - `real`: The account that the proxy will make a call on behalf of. /// - `call_hash`: The hash of the call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get()))] pub fn announce( origin: OriginFor, - real: AccountIdLookupOf, + real: T::AccountId, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - let real = T::Lookup::lookup(real)?; Proxies::::get(&real) .0 .into_iter() @@ -421,17 +446,22 @@ pub mod pallet { /// Parameters: /// - `real`: The account that the proxy will make a call on behalf of. /// - `call_hash`: The hash of the call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # #[pallet::weight(T::WeightInfo::remove_announcement( T::MaxPending::get(), T::MaxProxies::get() ))] pub fn remove_announcement( origin: OriginFor, - real: AccountIdLookupOf, + real: T::AccountId, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - let real = T::Lookup::lookup(real)?; Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; Ok(()) @@ -447,17 +477,22 @@ pub mod pallet { /// Parameters: /// - `delegate`: The account that previously announced the call. /// - `call_hash`: The hash of the call to be made. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # #[pallet::weight(T::WeightInfo::reject_announcement( T::MaxPending::get(), T::MaxProxies::get() ))] pub fn reject_announcement( origin: OriginFor, - delegate: AccountIdLookupOf, + delegate: T::AccountId, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - let delegate = T::Lookup::lookup(delegate)?; Self::edit_announcements(&delegate, |ann| { ann.real != who || ann.call_hash != call_hash })?; @@ -476,24 +511,28 @@ pub mod pallet { /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) + .saturating_add(di.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(di.weight), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) })] pub fn proxy_announced( origin: OriginFor, - delegate: AccountIdLookupOf, - real: AccountIdLookupOf, + delegate: T::AccountId, + real: T::AccountId, force_proxy_type: Option, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResult { ensure_signed(origin)?; - let delegate = T::Lookup::lookup(delegate)?; - let real = T::Lookup::lookup(real)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); @@ -516,10 +555,10 @@ pub mod pallet { pub enum Event { /// A proxy was executed correctly, with the given. ProxyExecuted { result: DispatchResult }, - /// A pure account has been created by new proxy with given + /// Anonymous account has been created by new proxy with given /// disambiguation index and proxy type. - PureCreated { - pure: T::AccountId, + AnonymousCreated { + anonymous: T::AccountId, who: T::AccountId, proxy_type: T::ProxyType, disambiguation_index: u16, @@ -593,7 +632,7 @@ pub mod pallet { } impl Pallet { - /// Calculate the address of an pure account. + /// Calculate the address of an anonymous account. /// /// - `who`: The spawner account. /// - `proxy_type`: The type of the proxy that the sender will be registered as over the @@ -602,9 +641,9 @@ impl Pallet { /// - `index`: A disambiguation index, in case this is called multiple times in the same /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just /// want to use `0`. - /// - `maybe_when`: The block height and extrinsic index of when the pure account was + /// - `maybe_when`: The block height and extrinsic index of when the anonymous account was /// created. None to use current block height and extrinsic index. - pub fn pure_account( + pub fn anonymous_account( who: &T::AccountId, proxy_type: &T::ProxyType, index: u16, @@ -767,12 +806,12 @@ impl Pallet { fn do_proxy( def: ProxyDefinition, real: T::AccountId, - call: ::RuntimeCall, + call: ::Call, ) { // This is a freshly authenticated new account, the origin restrictions doesn't apply. - let mut origin: T::RuntimeOrigin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::RuntimeCall| { - let c = ::RuntimeCall::from_ref(c); + let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); + origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); // We make sure the proxy call does access this pallet to change modify proxies. match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already @@ -781,9 +820,9 @@ impl Pallet { Some(Call::remove_proxy { ref proxy_type, .. }) if !def.proxy_type.is_superset(proxy_type) => false, - // Proxy call cannot remove all proxies or kill pure proxies unless it has full + // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full // permissions. - Some(Call::remove_proxies { .. }) | Some(Call::kill_pure { .. }) + Some(Call::remove_proxies { .. }) | Some(Call::kill_anonymous { .. }) if def.proxy_type != T::ProxyType::default() => false, _ => def.proxy_type.filter(c), diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 17bc7bcb92ee1..a0807f1d3d0b6 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -54,23 +54,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -88,15 +88,15 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } impl pallet_utility::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type PalletsOrigin = OriginCaller; type WeightInfo = (); } @@ -124,14 +124,14 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { +impl InstanceFilter for ProxyType { + fn filter(&self, c: &Call) -> bool { match self { ProxyType::Any => true, ProxyType::JustTransfer => { - matches!(c, RuntimeCall::Balances(pallet_balances::Call::transfer { .. })) + matches!(c, Call::Balances(pallet_balances::Call::transfer { .. })) }, - ProxyType::JustUtility => matches!(c, RuntimeCall::Utility { .. }), + ProxyType::JustUtility => matches!(c, Call::Utility { .. }), } } fn is_superset(&self, o: &Self) -> bool { @@ -139,19 +139,19 @@ impl InstanceFilter for ProxyType { } } pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(c: &RuntimeCall) -> bool { +impl Contains for BaseFilter { + fn contains(c: &Call) -> bool { match *c { // Remark is used as a no-op call in the benchmarking - RuntimeCall::System(SystemCall::remark { .. }) => true, - RuntimeCall::System(_) => false, + Call::System(SystemCall::remark { .. }) => true, + Call::System(_) => false, _ => true, } } } impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type Currency = Balances; type ProxyType = ProxyType; type ProxyDepositBase = ConstU64<1>; @@ -183,7 +183,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn last_events(n: usize) -> Vec { +fn last_events(n: usize) -> Vec { system::Pallet::::events() .into_iter() .rev() @@ -193,18 +193,18 @@ fn last_events(n: usize) -> Vec { .collect() } -fn expect_events(e: Vec) { +fn expect_events(e: Vec) { assert_eq!(last_events(e.len()), e); } -fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) } #[test] fn announcement_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); System::assert_last_event( ProxyEvent::ProxyAdded { delegator: 1, @@ -214,10 +214,10 @@ fn announcement_works() { } .into(), ); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); assert_eq!(Balances::reserved_balance(3), 0); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -225,7 +225,7 @@ fn announcement_works() { ); assert_eq!(Balances::reserved_balance(3), announcements.1); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -236,23 +236,20 @@ fn announcement_works() { ); assert_eq!(Balances::reserved_balance(3), announcements.1); - assert_noop!( - Proxy::announce(RuntimeOrigin::signed(3), 2, [3; 32].into()), - Error::::TooMany - ); + assert_noop!(Proxy::announce(Origin::signed(3), 2, [3; 32].into()), Error::::TooMany); }); } #[test] fn remove_announcement_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); let e = Error::::NotFound; - assert_noop!(Proxy::remove_announcement(RuntimeOrigin::signed(3), 1, [0; 32].into()), e); - assert_ok!(Proxy::remove_announcement(RuntimeOrigin::signed(3), 1, [1; 32].into())); + assert_noop!(Proxy::remove_announcement(Origin::signed(3), 1, [0; 32].into()), e); + assert_ok!(Proxy::remove_announcement(Origin::signed(3), 1, [1; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -265,15 +262,15 @@ fn remove_announcement_works() { #[test] fn reject_announcement_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); let e = Error::::NotFound; - assert_noop!(Proxy::reject_announcement(RuntimeOrigin::signed(1), 3, [0; 32].into()), e); + assert_noop!(Proxy::reject_announcement(Origin::signed(1), 3, [0; 32].into()), e); let e = Error::::NotFound; - assert_noop!(Proxy::reject_announcement(RuntimeOrigin::signed(4), 3, [1; 32].into()), e); - assert_ok!(Proxy::reject_announcement(RuntimeOrigin::signed(1), 3, [1; 32].into())); + assert_noop!(Proxy::reject_announcement(Origin::signed(4), 3, [1; 32].into()), e); + assert_ok!(Proxy::reject_announcement(Origin::signed(1), 3, [1; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -286,44 +283,41 @@ fn reject_announcement_works() { #[test] fn announcer_must_be_proxy() { new_test_ext().execute_with(|| { - assert_noop!( - Proxy::announce(RuntimeOrigin::signed(2), 1, H256::zero()), - Error::::NotProxy - ); + assert_noop!(Proxy::announce(Origin::signed(2), 1, H256::zero()), Error::::NotProxy); }); } #[test] fn delayed_requires_pre_announcement() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 1)); let call = Box::new(call_transfer(6, 1)); let e = Error::::Unannounced; - assert_noop!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone()), e); + assert_noop!(Proxy::proxy(Origin::signed(2), 1, None, call.clone()), e); let e = Error::::Unannounced; - assert_noop!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone()), e); + assert_noop!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone()), e); let call_hash = BlakeTwo256::hash_of(&call); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); + assert_ok!(Proxy::announce(Origin::signed(2), 1, call_hash)); system::Pallet::::set_block_number(2); - assert_ok!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone())); + assert_ok!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone())); }); } #[test] fn proxy_announced_removes_announcement_and_returns_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); let call = Box::new(call_transfer(6, 1)); let call_hash = BlakeTwo256::hash_of(&call); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, call_hash)); - assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, call_hash)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, call_hash)); + assert_ok!(Proxy::announce(Origin::signed(3), 2, call_hash)); // Too early to execute announced call let e = Error::::Unannounced; - assert_noop!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone()), e); + assert_noop!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone()), e); system::Pallet::::set_block_number(2); - assert_ok!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone())); + assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); let announcements = Announcements::::get(3); assert_eq!(announcements.0, vec![Announcement { real: 2, call_hash, height: 1 }]); assert_eq!(Balances::reserved_balance(3), announcements.1); @@ -334,16 +328,16 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { fn filtering_works() { new_test_ext().execute_with(|| { assert!(Balances::mutate_account(&1, |a| a.free = 1000).is_ok()); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::JustTransfer, 0)); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); let call = Box::new(call_transfer(6, 1)); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); @@ -352,70 +346,65 @@ fn filtering_works() { assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); let inner = Box::new(call_transfer(6, 1)); - let call = Box::new(RuntimeCall::Utility(UtilityCall::as_derivative { - index: 0, - call: inner.clone(), - })); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); + let call = + Box::new(Call::Utility(UtilityCall::as_derivative { index: 0, call: inner.clone() })); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { calls: vec![*inner] })); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); + let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } .into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - let inner = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_add_proxy( - 5, - ProxyType::Any, - 0, - ))); - let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { calls: vec![*inner] })); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); + let inner = + Box::new(Call::Proxy(ProxyCall::new_call_variant_add_proxy(5, ProxyType::Any, 0))); + let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } .into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - let call = Box::new(RuntimeCall::Proxy(ProxyCall::remove_proxies {})); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + let call = Box::new(Call::Proxy(ProxyCall::remove_proxies {})); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ BalancesEvent::::Unreserved { who: 1, amount: 5 }.into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), @@ -426,27 +415,27 @@ fn filtering_works() { #[test] fn add_remove_proxies_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); assert_noop!( - Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0), + Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), Error::::Duplicate ); assert_eq!(Balances::reserved_balance(1), 2); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 5); assert_noop!( - Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::Any, 0), + Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), Error::::TooMany ); assert_noop!( - Proxy::remove_proxy(RuntimeOrigin::signed(1), 3, ProxyType::JustTransfer, 0), + Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), Error::::NotFound ); - assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); System::assert_last_event( ProxyEvent::ProxyRemoved { delegator: 1, @@ -457,7 +446,7 @@ fn add_remove_proxies_works() { .into(), ); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 3); System::assert_last_event( ProxyEvent::ProxyRemoved { @@ -468,7 +457,7 @@ fn add_remove_proxies_works() { } .into(), ); - assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 2); System::assert_last_event( ProxyEvent::ProxyRemoved { @@ -479,7 +468,7 @@ fn add_remove_proxies_works() { } .into(), ); - assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); System::assert_last_event( ProxyEvent::ProxyRemoved { @@ -491,7 +480,7 @@ fn add_remove_proxies_works() { .into(), ); assert_noop!( - Proxy::add_proxy(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0), + Proxy::add_proxy(Origin::signed(1), 1, ProxyType::Any, 0), Error::::NoSelfProxy ); }); @@ -500,10 +489,10 @@ fn add_remove_proxies_works() { #[test] fn cannot_add_proxy_without_balance() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(5), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(5), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(5), 2); assert_noop!( - Proxy::add_proxy(RuntimeOrigin::signed(5), 4, ProxyType::Any, 0), + Proxy::add_proxy(Origin::signed(5), 4, ProxyType::Any, 0), BalancesError::::InsufficientBalance ); }); @@ -512,51 +501,49 @@ fn cannot_add_proxy_without_balance() { #[test] fn proxying_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); - assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); let call = Box::new(call_transfer(6, 1)); assert_noop!( - Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone()), + Proxy::proxy(Origin::signed(4), 1, None, call.clone()), Error::::NotProxy ); assert_noop!( - Proxy::proxy(RuntimeOrigin::signed(2), 1, Some(ProxyType::Any), call.clone()), + Proxy::proxy(Origin::signed(2), 1, Some(ProxyType::Any), call.clone()), Error::::NotProxy ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(RuntimeCall::System(SystemCall::set_code { code: vec![] })); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + let call = Box::new(Call::System(SystemCall::set_code { code: vec![] })); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_keep_alive { - dest: 6, - value: 1, - })); - assert_ok!(RuntimeCall::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) - .dispatch(RuntimeOrigin::signed(2))); + let call = + Box::new(Call::Balances(BalancesCall::transfer_keep_alive { dest: 6, value: 1 })); + assert_ok!(Call::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) + .dispatch(Origin::signed(2))); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 2); }); } #[test] -fn pure_works() { +fn anonymous_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); - let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); + let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( - ProxyEvent::PureCreated { - pure: anon, + ProxyEvent::AnonymousCreated { + anonymous: anon.clone(), who: 1, proxy_type: ProxyType::Any, disambiguation_index: 0, @@ -564,46 +551,46 @@ fn pure_works() { .into(), ); - // other calls to pure allowed as long as they're not exactly the same. - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::JustTransfer, 0, 0)); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 1)); - let anon2 = Proxy::pure_account(&2, &ProxyType::Any, 0, None); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(2), ProxyType::Any, 0, 0)); + // other calls to anonymous allowed as long as they're not exactly the same. + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 1)); + let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); + assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0, 0)); assert_noop!( - Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0), + Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate ); System::set_extrinsic_index(1); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); System::set_block_number(2); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let call = Box::new(call_transfer(6, 1)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), anon, 5)); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call)); + assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); + assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call)); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_kill_pure( + let call = Box::new(Call::Proxy(ProxyCall::new_call_variant_kill_anonymous( 1, ProxyType::Any, 0, 1, 0, ))); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), anon2, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(2), anon2, None, call.clone())); let de = DispatchError::from(Error::::NoPermission).stripped(); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); assert_noop!( - Proxy::kill_pure(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), + Proxy::kill_anonymous(Origin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission ); assert_eq!(Balances::free_balance(1), 0); - assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone())); + assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call.clone())); assert_eq!(Balances::free_balance(1), 2); assert_noop!( - Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone()), + Proxy::proxy(Origin::signed(1), anon, None, call.clone()), Error::::NotProxy ); }); diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 706810d3402ec..19beaf4d1401b 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_proxy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/proxy/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/proxy/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,251 +52,203 @@ pub trait WeightInfo { fn add_proxy(p: u32, ) -> Weight; fn remove_proxy(p: u32, ) -> Weight; fn remove_proxies(p: u32, ) -> Weight; - fn create_pure(p: u32, ) -> Weight; - fn kill_pure(p: u32, ) -> Weight; + fn anonymous(p: u32, ) -> Weight; + fn kill_anonymous(p: u32, ) -> Weight; } /// Weights for pallet_proxy using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Proxy Proxies (r:1 w:0) - /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { - // Minimum execution time: 24_285 nanoseconds. - Weight::from_ref_time(25_355_667 as u64) - // Standard Error: 1_468 - .saturating_add(Weight::from_ref_time(38_185 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (17_768_000 as Weight) + // Standard Error: 2_000 + .saturating_add((76_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 44_948 nanoseconds. - Weight::from_ref_time(44_762_064 as u64) - // Standard Error: 1_778 - .saturating_add(Weight::from_ref_time(118_940 as u64).saturating_mul(a as u64)) - // Standard Error: 1_837 - .saturating_add(Weight::from_ref_time(51_232 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (35_682_000 as Weight) + // Standard Error: 2_000 + .saturating_add((158_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((73_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 31_274 nanoseconds. - Weight::from_ref_time(32_219_165 as u64) - // Standard Error: 1_832 - .saturating_add(Weight::from_ref_time(132_454 as u64).saturating_mul(a as u64)) - // Standard Error: 1_893 - .saturating_add(Weight::from_ref_time(9_077 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (25_586_000 as Weight) + // Standard Error: 1_000 + .saturating_add((175_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((18_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 31_219 nanoseconds. - Weight::from_ref_time(32_439_563 as u64) - // Standard Error: 1_829 - .saturating_add(Weight::from_ref_time(120_251 as u64).saturating_mul(a as u64)) - // Standard Error: 1_890 - .saturating_add(Weight::from_ref_time(8_689 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (25_794_000 as Weight) + // Standard Error: 1_000 + .saturating_add((173_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 40_388 nanoseconds. - Weight::from_ref_time(40_718_245 as u64) - // Standard Error: 1_821 - .saturating_add(Weight::from_ref_time(129_674 as u64).saturating_mul(a as u64)) - // Standard Error: 1_882 - .saturating_add(Weight::from_ref_time(56_001 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (33_002_000 as Weight) + // Standard Error: 2_000 + .saturating_add((163_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((79_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { - // Minimum execution time: 33_997 nanoseconds. - Weight::from_ref_time(34_840_036 as u64) - // Standard Error: 1_659 - .saturating_add(Weight::from_ref_time(71_349 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (28_166_000 as Weight) + // Standard Error: 2_000 + .saturating_add((105_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { - // Minimum execution time: 33_900 nanoseconds. - Weight::from_ref_time(35_069_110 as u64) - // Standard Error: 1_848 - .saturating_add(Weight::from_ref_time(82_380 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (28_128_000 as Weight) + // Standard Error: 3_000 + .saturating_add((118_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { - // Minimum execution time: 29_627 nanoseconds. - Weight::from_ref_time(30_641_642 as u64) - // Standard Error: 1_495 - .saturating_add(Weight::from_ref_time(51_919 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (24_066_000 as Weight) + // Standard Error: 2_000 + .saturating_add((81_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Minimum execution time: 37_761 nanoseconds. - Weight::from_ref_time(38_748_697 as u64) - // Standard Error: 1_594 - .saturating_add(Weight::from_ref_time(19_022 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn anonymous(p: u32, ) -> Weight { + (31_077_000 as Weight) + // Standard Error: 3_000 + .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Minimum execution time: 31_145 nanoseconds. - Weight::from_ref_time(31_933_568 as u64) - // Standard Error: 1_492 - .saturating_add(Weight::from_ref_time(50_250 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn kill_anonymous(p: u32, ) -> Weight { + (24_657_000 as Weight) + // Standard Error: 2_000 + .saturating_add((87_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { // Storage: Proxy Proxies (r:1 w:0) - /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { - // Minimum execution time: 24_285 nanoseconds. - Weight::from_ref_time(25_355_667 as u64) - // Standard Error: 1_468 - .saturating_add(Weight::from_ref_time(38_185 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (17_768_000 as Weight) + // Standard Error: 2_000 + .saturating_add((76_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 44_948 nanoseconds. - Weight::from_ref_time(44_762_064 as u64) - // Standard Error: 1_778 - .saturating_add(Weight::from_ref_time(118_940 as u64).saturating_mul(a as u64)) - // Standard Error: 1_837 - .saturating_add(Weight::from_ref_time(51_232 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (35_682_000 as Weight) + // Standard Error: 2_000 + .saturating_add((158_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((73_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 31_274 nanoseconds. - Weight::from_ref_time(32_219_165 as u64) - // Standard Error: 1_832 - .saturating_add(Weight::from_ref_time(132_454 as u64).saturating_mul(a as u64)) - // Standard Error: 1_893 - .saturating_add(Weight::from_ref_time(9_077 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (25_586_000 as Weight) + // Standard Error: 1_000 + .saturating_add((175_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((18_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 31_219 nanoseconds. - Weight::from_ref_time(32_439_563 as u64) - // Standard Error: 1_829 - .saturating_add(Weight::from_ref_time(120_251 as u64).saturating_mul(a as u64)) - // Standard Error: 1_890 - .saturating_add(Weight::from_ref_time(8_689 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (25_794_000 as Weight) + // Standard Error: 1_000 + .saturating_add((173_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { - // Minimum execution time: 40_388 nanoseconds. - Weight::from_ref_time(40_718_245 as u64) - // Standard Error: 1_821 - .saturating_add(Weight::from_ref_time(129_674 as u64).saturating_mul(a as u64)) - // Standard Error: 1_882 - .saturating_add(Weight::from_ref_time(56_001 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (33_002_000 as Weight) + // Standard Error: 2_000 + .saturating_add((163_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((79_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { - // Minimum execution time: 33_997 nanoseconds. - Weight::from_ref_time(34_840_036 as u64) - // Standard Error: 1_659 - .saturating_add(Weight::from_ref_time(71_349 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (28_166_000 as Weight) + // Standard Error: 2_000 + .saturating_add((105_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { - // Minimum execution time: 33_900 nanoseconds. - Weight::from_ref_time(35_069_110 as u64) - // Standard Error: 1_848 - .saturating_add(Weight::from_ref_time(82_380 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (28_128_000 as Weight) + // Standard Error: 3_000 + .saturating_add((118_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { - // Minimum execution time: 29_627 nanoseconds. - Weight::from_ref_time(30_641_642 as u64) - // Standard Error: 1_495 - .saturating_add(Weight::from_ref_time(51_919 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (24_066_000 as Weight) + // Standard Error: 2_000 + .saturating_add((81_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Minimum execution time: 37_761 nanoseconds. - Weight::from_ref_time(38_748_697 as u64) - // Standard Error: 1_594 - .saturating_add(Weight::from_ref_time(19_022 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn anonymous(p: u32, ) -> Weight { + (31_077_000 as Weight) + // Standard Error: 3_000 + .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Proxy Proxies (r:1 w:1) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Minimum execution time: 31_145 nanoseconds. - Weight::from_ref_time(31_933_568 as u64) - // Standard Error: 1_492 - .saturating_add(Weight::from_ref_time(50_250 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn kill_anonymous(p: u32, ) -> Weight { + (24_657_000 as Weight) + // Standard Error: 2_000 + .saturating_add((87_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index be970ba2a8422..f709578f6941a 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -69,7 +69,7 @@ use safe_mix::TripletMix; use codec::Encode; -use frame_support::{pallet_prelude::Weight, traits::Randomness}; +use frame_support::traits::Randomness; use sp_runtime::traits::{Hash, Saturating}; const RANDOM_MATERIAL_LEN: u32 = 81; @@ -187,7 +187,7 @@ mod tests { parameter_types! { pub BlockWeights: limits::BlockWeights = limits::BlockWeights - ::simple_max(Weight::from_ref_time(1024)); + ::simple_max(1024); pub BlockLength: limits::BlockLength = limits::BlockLength ::max(2 * 1024); } @@ -197,16 +197,16 @@ mod tests { type BlockWeights = (); type BlockLength = BlockLength; type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/ranked-collective/Cargo.toml b/frame/ranked-collective/Cargo.toml index c8cf671a97467..cb43b9ea4c831 100644 --- a/frame/ranked-collective/Cargo.toml +++ b/frame/ranked-collective/Cargo.toml @@ -29,7 +29,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/ranked-collective/src/benchmarking.rs b/frame/ranked-collective/src/benchmarking.rs index eb629b330abb2..ab1a5dc283ca5 100644 --- a/frame/ranked-collective/src/benchmarking.rs +++ b/frame/ranked-collective/src/benchmarking.rs @@ -27,21 +27,17 @@ use frame_system::RawOrigin as SystemOrigin; const SEED: u32 = 0; -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } fn make_member, I: 'static>(rank: Rank) -> T::AccountId { let who = account::("member", MemberCount::::get(0), SEED); - let who_lookup = T::Lookup::unlookup(who.clone()); - assert_ok!(Pallet::::add_member( - T::PromoteOrigin::successful_origin(), - who_lookup.clone() - )); + assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), who.clone())); for _ in 0..rank { assert_ok!(Pallet::::promote_member( T::PromoteOrigin::successful_origin(), - who_lookup.clone() + who.clone() )); } who @@ -50,9 +46,8 @@ fn make_member, I: 'static>(rank: Rank) -> T::AccountId { benchmarks_instance_pallet! { add_member { let who = account::("member", 0, SEED); - let who_lookup = T::Lookup::unlookup(who.clone()); let origin = T::PromoteOrigin::successful_origin(); - let call = Call::::add_member { who: who_lookup }; + let call = Call::::add_member { who: who.clone() }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(MemberCount::::get(0), 1); @@ -64,11 +59,10 @@ benchmarks_instance_pallet! { let rank = r as u16; let first = make_member::(rank); let who = make_member::(rank); - let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); let last_index = (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect::>(); let origin = T::DemoteOrigin::successful_origin(); - let call = Call::::remove_member { who: who_lookup, min_rank: rank }; + let call = Call::::remove_member { who: who.clone(), min_rank: rank }; }: { call.dispatch_bypass_filter(origin)? } verify { for r in 0..=rank { @@ -82,9 +76,8 @@ benchmarks_instance_pallet! { let r in 0 .. 10; let rank = r as u16; let who = make_member::(rank); - let who_lookup = T::Lookup::unlookup(who.clone()); let origin = T::PromoteOrigin::successful_origin(); - let call = Call::::promote_member { who: who_lookup }; + let call = Call::::promote_member { who: who.clone() }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(Members::::get(&who).unwrap().rank, rank + 1); @@ -96,11 +89,10 @@ benchmarks_instance_pallet! { let rank = r as u16; let first = make_member::(rank); let who = make_member::(rank); - let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); let last_index = IdToIndex::::get(rank, &last).unwrap(); let origin = T::DemoteOrigin::successful_origin(); - let call = Call::::demote_member { who: who_lookup }; + let call = Call::::demote_member { who: who.clone() }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(Members::::get(&who).map(|x| x.rank), rank.checked_sub(1)); @@ -114,15 +106,14 @@ benchmarks_instance_pallet! { vote { let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); - assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), caller_lookup.clone())); + assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), caller.clone())); // Create a poll let class = T::Polls::classes().into_iter().next().unwrap(); let rank = T::MinRankOfClass::convert(class.clone()); for _ in 0..rank { assert_ok!(Pallet::::promote_member( T::PromoteOrigin::successful_origin(), - caller_lookup.clone() + caller.clone() )); } @@ -138,7 +129,7 @@ benchmarks_instance_pallet! { } cleanup_poll { - let n in 0 .. 100; + let n in 1 .. 100; // Create a poll let class = T::Polls::classes().into_iter().next().unwrap(); diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 33aed2704918c..7ea43a9017445 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -21,7 +21,7 @@ //! systems such as the Referenda pallet. Members each have a rank, with zero being the lowest. //! There is no complexity limitation on either the number of members at a rank or the number of //! ranks in the system thus allowing potentially public membership. A member of at least a given -//! rank can be selected at random in O(1) time, allowing for various games to be constructed using +//! rank can be selected at random in O(1) time, allowing for various games to constructed using //! this as a primitive. Members may only be promoted and demoted by one rank at a time, however //! all operations (save one) are O(1) in complexity. The only operation which is not O(1) is the //! `remove_member` since they must be removed from all ranks from the present down to zero. @@ -33,7 +33,7 @@ //! //! Two `Config` trait items control these "rank privileges": `MinRankOfClass` and `VoteWeight`. //! The first controls which ranks are allowed to vote on a particular class of poll. The second -//! controls the weight of a vote given the voter's rank compared to the minimum rank of the poll. +//! controls the weight of a vote given the voters rank compared to the minimum rank of the poll. //! //! An origin control, `EnsureRank`, ensures that the origin is a member of the collective of at //! least a particular rank. @@ -43,18 +43,15 @@ use scale_info::TypeInfo; use sp_arithmetic::traits::Saturating; -use sp_runtime::{ - traits::{Convert, StaticLookup}, - ArithmeticError::Overflow, - Perbill, RuntimeDebug, -}; +use sp_runtime::{traits::Convert, ArithmeticError::Overflow, Perbill, RuntimeDebug}; use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ codec::{Decode, Encode, MaxEncodedLen}, - dispatch::{DispatchError, DispatchResultWithPostInfo, PostDispatchInfo}, + dispatch::{DispatchError, DispatchResultWithPostInfo}, ensure, traits::{EnsureOrigin, PollStatus, Polling, VoteTally}, + weights::PostDispatchInfo, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; @@ -88,16 +85,16 @@ pub type Votes = u32; Decode, MaxEncodedLen, )] -#[scale_info(skip_type_params(T, I, M))] +#[scale_info(skip_type_params(M))] #[codec(mel_bound())] -pub struct Tally { +pub struct Tally { bare_ayes: MemberIndex, ayes: Votes, nays: Votes, - dummy: PhantomData<(T, I, M)>, + dummy: PhantomData, } -impl, I: 'static, M: GetMaxVoters> Tally { +impl Tally { pub fn from_parts(bare_ayes: MemberIndex, ayes: Votes, nays: Votes) -> Self { Tally { bare_ayes, ayes, nays, dummy: PhantomData } } @@ -110,11 +107,10 @@ impl, I: 'static, M: GetMaxVoters> Tally { // All functions of VoteTally now include the class as a param. -pub type TallyOf = Tally>; +pub type TallyOf = Tally>; pub type PollIndexOf = <>::Polls as Polling>>::Index; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -impl, I: 'static, M: GetMaxVoters> VoteTally for Tally { +impl VoteTally for Tally { fn new(_: Rank) -> Self { Self { bare_ayes: 0, ayes: 0, nays: 0, dummy: PhantomData } } @@ -147,20 +143,6 @@ impl, I: 'static, M: GetMaxVoters> VoteTally for Tally let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { bare_ayes: ayes, ayes, nays, dummy: PhantomData } } - - #[cfg(feature = "runtime-benchmarks")] - fn setup(class: Rank, granularity: Perbill) { - if M::get_max_voters(class) == 0 { - let max_voters = granularity.saturating_reciprocal_mul(1u32); - for i in 0..max_voters { - let who: T::AccountId = - frame_benchmarking::account("ranked_collective_benchmarking", i, 0); - crate::Pallet::::do_add_member_to_rank(who, class) - .expect("could not add members for benchmarks"); - } - assert_eq!(M::get_max_voters(class), max_voters); - } - } } /// Record needed for every member. @@ -199,11 +181,11 @@ impl Convert for Unit { /// Vote-weight scheme where all voters get one vote plus an additional vote for every excess rank /// they have. I.e.: /// -/// - Each member with an excess rank of 0 gets 1 vote; +/// - Each member with no excess rank gets 1 vote; /// - ...with an excess rank of 1 gets 2 votes; -/// - ...with an excess rank of 2 gets 3 votes; -/// - ...with an excess rank of 3 gets 4 votes; -/// - ...with an excess rank of 4 gets 5 votes. +/// - ...with an excess rank of 2 gets 2 votes; +/// - ...with an excess rank of 3 gets 3 votes; +/// - ...with an excess rank of 4 gets 4 votes. pub struct Linear; impl Convert for Linear { fn convert(r: Rank) -> Votes { @@ -214,11 +196,11 @@ impl Convert for Linear { /// Vote-weight scheme where all voters get one vote plus additional votes for every excess rank /// they have incrementing by one vote for each excess rank. I.e.: /// -/// - Each member with an excess rank of 0 gets 1 vote; -/// - ...with an excess rank of 1 gets 3 votes; -/// - ...with an excess rank of 2 gets 6 votes; -/// - ...with an excess rank of 3 gets 10 votes; -/// - ...with an excess rank of 4 gets 15 votes. +/// - Each member with no excess rank gets 1 vote; +/// - ...with an excess rank of 1 gets 2 votes; +/// - ...with an excess rank of 2 gets 3 votes; +/// - ...with an excess rank of 3 gets 6 votes; +/// - ...with an excess rank of 4 gets 10 votes. pub struct Geometric; impl Convert for Geometric { fn convert(r: Rank) -> Votes { @@ -241,12 +223,12 @@ impl, I: 'static> GetMaxVoters for Pallet { /// Guard to ensure that the given origin is a member of the collective. The rank of the member is /// the `Success` value. pub struct EnsureRanked(PhantomData<(T, I)>); -impl, I: 'static, const MIN_RANK: u16> EnsureOrigin +impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureRanked { type Success = Rank; - fn try_origin(o: T::RuntimeOrigin) -> Result { + fn try_origin(o: T::Origin) -> Result { let who = frame_system::EnsureSigned::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok(rank), @@ -255,34 +237,21 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { + fn try_successful_origin() -> Result { let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::RuntimeOrigin { - match Self::try_successful_origin() { - Ok(o) => o, - Err(()) => { - let who: T::AccountId = frame_benchmarking::whitelisted_caller(); - crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) - .expect("failed to add ranked member"); - frame_system::RawOrigin::Signed(who).into() - }, - } - } } /// Guard to ensure that the given origin is a member of the collective. The account ID of the /// member is the `Success` value. pub struct EnsureMember(PhantomData<(T, I)>); -impl, I: 'static, const MIN_RANK: u16> EnsureOrigin +impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureMember { type Success = T::AccountId; - fn try_origin(o: T::RuntimeOrigin) -> Result { + fn try_origin(o: T::Origin) -> Result { let who = frame_system::EnsureSigned::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok(who), @@ -291,34 +260,21 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { + fn try_successful_origin() -> Result { let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::RuntimeOrigin { - match Self::try_successful_origin() { - Ok(o) => o, - Err(()) => { - let who: T::AccountId = frame_benchmarking::whitelisted_caller(); - crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) - .expect("failed to add ranked member"); - frame_system::RawOrigin::Signed(who).into() - }, - } - } } -/// Guard to ensure that the given origin is a member of the collective. The pair of both the -/// account ID and the rank of the member is the `Success` value. +/// Guard to ensure that the given origin is a member of the collective. The pair of including both +/// the account ID and the rank of the member is the `Success` value. pub struct EnsureRankedMember(PhantomData<(T, I)>); -impl, I: 'static, const MIN_RANK: u16> EnsureOrigin +impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureRankedMember { type Success = (T::AccountId, Rank); - fn try_origin(o: T::RuntimeOrigin) -> Result { + fn try_origin(o: T::Origin) -> Result { let who = frame_system::EnsureSigned::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok((who, rank)), @@ -327,23 +283,10 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin Result { + fn try_successful_origin() -> Result { let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::RuntimeOrigin { - match Self::try_successful_origin() { - Ok(o) => o, - Err(()) => { - let who: T::AccountId = frame_benchmarking::whitelisted_caller(); - crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) - .expect("failed to add ranked member"); - frame_system::RawOrigin::Signed(who).into() - }, - } - } } #[frame_support::pallet] @@ -361,17 +304,16 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - /// The runtime event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + /// The outer event type. + type Event: From> + IsType<::Event>; /// The origin required to add or promote a mmember. The success value indicates the /// maximum rank *to which* the promotion may be. - type PromoteOrigin: EnsureOrigin; + type PromoteOrigin: EnsureOrigin; /// The origin required to demote or remove a member. The success value indicates the /// maximum rank *from which* the demotion/removal may be. - type DemoteOrigin: EnsureOrigin; + type DemoteOrigin: EnsureOrigin; /// The polling system used for our voting. type Polls: Polling, Votes = Votes, Moment = Self::BlockNumber>; @@ -430,7 +372,7 @@ pub mod pallet { pub enum Event, I: 'static = ()> { /// A member `who` has been added. MemberAdded { who: T::AccountId }, - /// The member `who`se rank has been changed to the given `rank`. + /// The member `who`'s rank has been changed to the given `rank`. RankChanged { who: T::AccountId, rank: Rank }, /// The member `who` of given `rank` has been removed from the collective. MemberRemoved { who: T::AccountId, rank: Rank }, @@ -471,10 +413,19 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::add_member())] - pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let _ = T::PromoteOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; - Self::do_add_member(who) + ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); + let index = MemberCount::::get(0); + let count = index.checked_add(1).ok_or(Overflow)?; + + Members::::insert(&who, MemberRecord { rank: 0 }); + IdToIndex::::insert(0, &who, index); + IndexToId::::insert(0, index, &who); + MemberCount::::insert(0, count); + Self::deposit_event(Event::MemberAdded { who }); + + Ok(()) } /// Increment the rank of an existing member by one. @@ -484,10 +435,19 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::promote_member(0))] - pub fn promote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn promote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let max_rank = T::PromoteOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; - Self::do_promote_member(who, Some(max_rank)) + let record = Self::ensure_member(&who)?; + let rank = record.rank.checked_add(1).ok_or(Overflow)?; + ensure!(max_rank >= rank, Error::::NoPermission); + let index = MemberCount::::get(rank); + MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); + IdToIndex::::insert(rank, &who, index); + IndexToId::::insert(rank, index, &who); + Members::::insert(&who, MemberRecord { rank }); + Self::deposit_event(Event::RankChanged { who, rank }); + + Ok(()) } /// Decrement the rank of an existing member by one. If the member is already at rank zero, @@ -498,9 +458,8 @@ pub mod pallet { /// /// Weight: `O(1)`, less if the member's index is highest in its rank. #[pallet::weight(T::WeightInfo::demote_member(0))] - pub fn demote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + pub fn demote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; let mut record = Self::ensure_member(&who)?; let rank = record.rank; ensure!(max_rank >= rank, Error::::NoPermission); @@ -531,11 +490,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_member(*min_rank as u32))] pub fn remove_member( origin: OriginFor, - who: AccountIdLookupOf, + who: T::AccountId, min_rank: Rank, ) -> DispatchResultWithPostInfo { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; let MemberRecord { rank, .. } = Self::ensure_member(&who)?; ensure!(min_rank >= rank, Error::::InvalidWitness); ensure!(max_rank >= rank, Error::::NoPermission); @@ -668,53 +626,5 @@ pub mod pallet { MemberCount::::mutate(rank, |r| r.saturating_dec()); Ok(()) } - - /// Adds a member into the ranked collective at level 0. - /// - /// No origin checks are executed. - pub fn do_add_member(who: T::AccountId) -> DispatchResult { - ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); - let index = MemberCount::::get(0); - let count = index.checked_add(1).ok_or(Overflow)?; - - Members::::insert(&who, MemberRecord { rank: 0 }); - IdToIndex::::insert(0, &who, index); - IndexToId::::insert(0, index, &who); - MemberCount::::insert(0, count); - Self::deposit_event(Event::MemberAdded { who }); - Ok(()) - } - - /// Promotes a member in the ranked collective into the next role. - /// - /// A `maybe_max_rank` may be provided to check that the member does not get promoted beyond - /// a certain rank. Is `None` is provided, then the rank will be incremented without checks. - pub fn do_promote_member( - who: T::AccountId, - maybe_max_rank: Option, - ) -> DispatchResult { - let record = Self::ensure_member(&who)?; - let rank = record.rank.checked_add(1).ok_or(Overflow)?; - if let Some(max_rank) = maybe_max_rank { - ensure!(max_rank >= rank, Error::::NoPermission); - } - let index = MemberCount::::get(rank); - MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); - IdToIndex::::insert(rank, &who, index); - IndexToId::::insert(rank, index, &who); - Members::::insert(&who, MemberRecord { rank }); - Self::deposit_event(Event::RankChanged { who, rank }); - Ok(()) - } - - /// Add a member to the rank collective, and continue to promote them until a certain rank - /// is reached. - pub fn do_add_member_to_rank(who: T::AccountId, rank: Rank) -> DispatchResult { - Self::do_add_member(who.clone())?; - for _ in 0..rank { - Self::do_promote_member(who.clone(), None)?; - } - Ok(()) - } } } diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 68bb79f3d07f7..4344a1be730fb 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -22,7 +22,6 @@ use std::collections::BTreeMap; use frame_support::{ assert_noop, assert_ok, error::BadOrigin, - pallet_prelude::Weight, parameter_types, traits::{ConstU16, ConstU32, ConstU64, EitherOf, Everything, MapSuccess, Polling}, }; @@ -51,23 +50,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); + frame_system::limits::BlockWeights::simple_max(1_000_000); } impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -171,7 +170,7 @@ impl Polling> for TestPolls { impl Config for Test { type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PromoteOrigin = EitherOf< // Root can promote arbitrarily. frame_system::EnsureRootWithSuccess>, @@ -239,10 +238,10 @@ fn basic_stuff() { #[test] fn member_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(Origin::root(), 1)); + assert_ok!(Club::promote_member(Origin::root(), 1)); + assert_ok!(Club::demote_member(Origin::root(), 1)); + assert_ok!(Club::demote_member(Origin::root(), 1)); assert_eq!(member_count(0), 0); assert_eq!(member_count(1), 0); }); @@ -251,29 +250,29 @@ fn member_lifecycle_works() { #[test] fn add_remove_works() { new_test_ext().execute_with(|| { - assert_noop!(Club::add_member(RuntimeOrigin::signed(1), 1), DispatchError::BadOrigin); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_noop!(Club::add_member(Origin::signed(1), 1), DispatchError::BadOrigin); + assert_ok!(Club::add_member(Origin::root(), 1)); assert_eq!(member_count(0), 1); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::demote_member(Origin::root(), 1)); assert_eq!(member_count(0), 0); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(Origin::root(), 1)); assert_eq!(member_count(0), 1); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(Origin::root(), 2)); assert_eq!(member_count(0), 2); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::add_member(Origin::root(), 3)); assert_eq!(member_count(0), 3); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::demote_member(Origin::root(), 3)); assert_eq!(member_count(0), 2); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::demote_member(Origin::root(), 1)); assert_eq!(member_count(0), 1); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::demote_member(Origin::root(), 2)); assert_eq!(member_count(0), 0); }); } @@ -281,29 +280,29 @@ fn add_remove_works() { #[test] fn promote_demote_works() { new_test_ext().execute_with(|| { - assert_noop!(Club::add_member(RuntimeOrigin::signed(1), 1), DispatchError::BadOrigin); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_noop!(Club::add_member(Origin::signed(1), 1), DispatchError::BadOrigin); + assert_ok!(Club::add_member(Origin::root(), 1)); assert_eq!(member_count(0), 1); assert_eq!(member_count(1), 0); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(Origin::root(), 2)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 0); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(Origin::root(), 1)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 1); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(Origin::root(), 2)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 2); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::demote_member(Origin::root(), 1)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 1); - assert_noop!(Club::demote_member(RuntimeOrigin::signed(1), 1), DispatchError::BadOrigin); - assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_noop!(Club::demote_member(Origin::signed(1), 1), DispatchError::BadOrigin); + assert_ok!(Club::demote_member(Origin::root(), 1)); assert_eq!(member_count(0), 1); assert_eq!(member_count(1), 1); }); @@ -312,100 +311,88 @@ fn promote_demote_works() { #[test] fn promote_demote_by_rank_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(Origin::root(), 1)); for _ in 0..7 { - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(Origin::root(), 1)); } // #1 can add #2 and promote to rank 1 - assert_ok!(Club::add_member(RuntimeOrigin::signed(1), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); + assert_ok!(Club::add_member(Origin::signed(1), 2)); + assert_ok!(Club::promote_member(Origin::signed(1), 2)); // #2 as rank 1 cannot do anything privileged - assert_noop!(Club::add_member(RuntimeOrigin::signed(2), 3), BadOrigin); + assert_noop!(Club::add_member(Origin::signed(2), 3), BadOrigin); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); + assert_ok!(Club::promote_member(Origin::signed(1), 2)); // #2 as rank 2 can add #3. - assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 3)); + assert_ok!(Club::add_member(Origin::signed(2), 3)); // #2 as rank 2 cannot promote #3 to rank 1 - assert_noop!( - Club::promote_member(RuntimeOrigin::signed(2), 3), - Error::::NoPermission - ); + assert_noop!(Club::promote_member(Origin::signed(2), 3), Error::::NoPermission); // #1 as rank 7 can promote #2 only up to rank 5 and once there cannot demote them. - assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); - assert_noop!( - Club::promote_member(RuntimeOrigin::signed(1), 2), - Error::::NoPermission - ); - assert_noop!(Club::demote_member(RuntimeOrigin::signed(1), 2), Error::::NoPermission); + assert_ok!(Club::promote_member(Origin::signed(1), 2)); + assert_ok!(Club::promote_member(Origin::signed(1), 2)); + assert_ok!(Club::promote_member(Origin::signed(1), 2)); + assert_noop!(Club::promote_member(Origin::signed(1), 2), Error::::NoPermission); + assert_noop!(Club::demote_member(Origin::signed(1), 2), Error::::NoPermission); // #2 as rank 5 can promote #3 only up to rank 3 and once there cannot demote them. - assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 3)); - assert_noop!( - Club::promote_member(RuntimeOrigin::signed(2), 3), - Error::::NoPermission - ); - assert_noop!(Club::demote_member(RuntimeOrigin::signed(2), 3), Error::::NoPermission); + assert_ok!(Club::promote_member(Origin::signed(2), 3)); + assert_ok!(Club::promote_member(Origin::signed(2), 3)); + assert_ok!(Club::promote_member(Origin::signed(2), 3)); + assert_noop!(Club::promote_member(Origin::signed(2), 3), Error::::NoPermission); + assert_noop!(Club::demote_member(Origin::signed(2), 3), Error::::NoPermission); // #2 can add #4 & #5 as rank 0 and #6 & #7 as rank 1. - assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 4)); - assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 5)); - assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 6)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 6)); - assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 7)); - assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 7)); + assert_ok!(Club::add_member(Origin::signed(2), 4)); + assert_ok!(Club::add_member(Origin::signed(2), 5)); + assert_ok!(Club::add_member(Origin::signed(2), 6)); + assert_ok!(Club::promote_member(Origin::signed(2), 6)); + assert_ok!(Club::add_member(Origin::signed(2), 7)); + assert_ok!(Club::promote_member(Origin::signed(2), 7)); // #3 as rank 3 can demote/remove #4 & #5 but not #6 & #7 - assert_ok!(Club::demote_member(RuntimeOrigin::signed(3), 4)); - assert_ok!(Club::remove_member(RuntimeOrigin::signed(3), 5, 0)); - assert_noop!(Club::demote_member(RuntimeOrigin::signed(3), 6), Error::::NoPermission); - assert_noop!( - Club::remove_member(RuntimeOrigin::signed(3), 7, 1), - Error::::NoPermission - ); + assert_ok!(Club::demote_member(Origin::signed(3), 4)); + assert_ok!(Club::remove_member(Origin::signed(3), 5, 0)); + assert_noop!(Club::demote_member(Origin::signed(3), 6), Error::::NoPermission); + assert_noop!(Club::remove_member(Origin::signed(3), 7, 1), Error::::NoPermission); // #2 as rank 5 can demote/remove #6 & #7 - assert_ok!(Club::demote_member(RuntimeOrigin::signed(2), 6)); - assert_ok!(Club::remove_member(RuntimeOrigin::signed(2), 7, 1)); + assert_ok!(Club::demote_member(Origin::signed(2), 6)); + assert_ok!(Club::remove_member(Origin::signed(2), 7, 1)); }); } #[test] fn voting_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(RuntimeOrigin::root(), 0)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); - - assert_noop!(Club::vote(RuntimeOrigin::signed(0), 3, true), Error::::RankTooLow); + assert_ok!(Club::add_member(Origin::root(), 0)); + assert_ok!(Club::add_member(Origin::root(), 1)); + assert_ok!(Club::promote_member(Origin::root(), 1)); + assert_ok!(Club::add_member(Origin::root(), 2)); + assert_ok!(Club::promote_member(Origin::root(), 2)); + assert_ok!(Club::promote_member(Origin::root(), 2)); + assert_ok!(Club::add_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); + + assert_noop!(Club::vote(Origin::signed(0), 3, true), Error::::RankTooLow); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); - assert_ok!(Club::vote(RuntimeOrigin::signed(1), 3, true)); + assert_ok!(Club::vote(Origin::signed(1), 3, true)); assert_eq!(tally(3), Tally::from_parts(1, 1, 0)); - assert_ok!(Club::vote(RuntimeOrigin::signed(1), 3, false)); + assert_ok!(Club::vote(Origin::signed(1), 3, false)); assert_eq!(tally(3), Tally::from_parts(0, 0, 1)); - assert_ok!(Club::vote(RuntimeOrigin::signed(2), 3, true)); + assert_ok!(Club::vote(Origin::signed(2), 3, true)); assert_eq!(tally(3), Tally::from_parts(1, 3, 1)); - assert_ok!(Club::vote(RuntimeOrigin::signed(2), 3, false)); + assert_ok!(Club::vote(Origin::signed(2), 3, false)); assert_eq!(tally(3), Tally::from_parts(0, 0, 4)); - assert_ok!(Club::vote(RuntimeOrigin::signed(3), 3, true)); + assert_ok!(Club::vote(Origin::signed(3), 3, true)); assert_eq!(tally(3), Tally::from_parts(1, 6, 4)); - assert_ok!(Club::vote(RuntimeOrigin::signed(3), 3, false)); + assert_ok!(Club::vote(Origin::signed(3), 3, false)); assert_eq!(tally(3), Tally::from_parts(0, 0, 10)); }); } @@ -413,94 +400,58 @@ fn voting_works() { #[test] fn cleanup_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); - - assert_ok!(Club::vote(RuntimeOrigin::signed(1), 3, true)); - assert_ok!(Club::vote(RuntimeOrigin::signed(2), 3, false)); - assert_ok!(Club::vote(RuntimeOrigin::signed(3), 3, true)); - - assert_noop!(Club::cleanup_poll(RuntimeOrigin::signed(4), 3, 10), Error::::Ongoing); + assert_ok!(Club::add_member(Origin::root(), 1)); + assert_ok!(Club::promote_member(Origin::root(), 1)); + assert_ok!(Club::add_member(Origin::root(), 2)); + assert_ok!(Club::promote_member(Origin::root(), 2)); + assert_ok!(Club::add_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); + + assert_ok!(Club::vote(Origin::signed(1), 3, true)); + assert_ok!(Club::vote(Origin::signed(2), 3, false)); + assert_ok!(Club::vote(Origin::signed(3), 3, true)); + + assert_noop!(Club::cleanup_poll(Origin::signed(4), 3, 10), Error::::Ongoing); Polls::set( vec![(1, Completed(1, true)), (2, Completed(2, false)), (3, Completed(3, true))] .into_iter() .collect(), ); - assert_ok!(Club::cleanup_poll(RuntimeOrigin::signed(4), 3, 10)); + assert_ok!(Club::cleanup_poll(Origin::signed(4), 3, 10)); // NOTE: This will fail until #10016 is merged. - // assert_noop!(Club::cleanup_poll(RuntimeOrigin::signed(4), 3, 10), - // Error::::NoneRemaining); + // assert_noop!(Club::cleanup_poll(Origin::signed(4), 3, 10), Error::::NoneRemaining); }); } #[test] fn ensure_ranked_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); - assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); - assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::add_member(Origin::root(), 1)); + assert_ok!(Club::promote_member(Origin::root(), 1)); + assert_ok!(Club::add_member(Origin::root(), 2)); + assert_ok!(Club::promote_member(Origin::root(), 2)); + assert_ok!(Club::promote_member(Origin::root(), 2)); + assert_ok!(Club::add_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); + assert_ok!(Club::promote_member(Origin::root(), 3)); use frame_support::traits::OriginTrait; type Rank1 = EnsureRanked; type Rank2 = EnsureRanked; type Rank3 = EnsureRanked; type Rank4 = EnsureRanked; - assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(1)).unwrap(), 1); - assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); - assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); - assert_eq!( - Rank2::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), - 1 - ); - assert_eq!(Rank2::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); - assert_eq!(Rank2::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); - assert_eq!( - Rank3::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), - 1 - ); - assert_eq!( - Rank3::try_origin(RuntimeOrigin::signed(2)).unwrap_err().as_signed().unwrap(), - 2 - ); - assert_eq!(Rank3::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); - assert_eq!( - Rank4::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), - 1 - ); - assert_eq!( - Rank4::try_origin(RuntimeOrigin::signed(2)).unwrap_err().as_signed().unwrap(), - 2 - ); - assert_eq!( - Rank4::try_origin(RuntimeOrigin::signed(3)).unwrap_err().as_signed().unwrap(), - 3 - ); + assert_eq!(Rank1::try_origin(Origin::signed(1)).unwrap(), 1); + assert_eq!(Rank1::try_origin(Origin::signed(2)).unwrap(), 2); + assert_eq!(Rank1::try_origin(Origin::signed(3)).unwrap(), 3); + assert_eq!(Rank2::try_origin(Origin::signed(1)).unwrap_err().as_signed().unwrap(), 1); + assert_eq!(Rank2::try_origin(Origin::signed(2)).unwrap(), 2); + assert_eq!(Rank2::try_origin(Origin::signed(3)).unwrap(), 3); + assert_eq!(Rank3::try_origin(Origin::signed(1)).unwrap_err().as_signed().unwrap(), 1); + assert_eq!(Rank3::try_origin(Origin::signed(2)).unwrap_err().as_signed().unwrap(), 2); + assert_eq!(Rank3::try_origin(Origin::signed(3)).unwrap(), 3); + assert_eq!(Rank4::try_origin(Origin::signed(1)).unwrap_err().as_signed().unwrap(), 1); + assert_eq!(Rank4::try_origin(Origin::signed(2)).unwrap_err().as_signed().unwrap(), 2); + assert_eq!(Rank4::try_origin(Origin::signed(3)).unwrap_err().as_signed().unwrap(), 3); }); } - -#[test] -fn do_add_member_to_rank_works() { - new_test_ext().execute_with(|| { - let max_rank = 9u16; - assert_ok!(Club::do_add_member_to_rank(69, max_rank / 2)); - assert_ok!(Club::do_add_member_to_rank(1337, max_rank)); - for i in 0..=max_rank { - if i <= max_rank / 2 { - assert_eq!(member_count(i), 2); - } else { - assert_eq!(member_count(i), 1); - } - } - assert_eq!(member_count(max_rank + 1), 0); - }) -} diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index c054d200452e8..3048dd804a5e2 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,25 +18,23 @@ //! Autogenerated weights for pallet_ranked_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! DATE: 2022-05-19, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /Users/gav/Core/substrate/target/release/substrate // benchmark // pallet +// --pallet +// pallet-ranked-collective +// --extrinsic=* // --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_ranked_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/ranked-collective/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=../../../frame/ranked-collective/src/weights.rs +// --template=../../../.maintain/frame-weight-template.hbs +// --header=../../../HEADER-APACHE2 +// --record-proof #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -63,73 +61,62 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - // Minimum execution time: 24_344 nanoseconds. - Weight::from_ref_time(24_856_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (11_000_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) - /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { - // Minimum execution time: 36_881 nanoseconds. - Weight::from_ref_time(39_284_238 as u64) - // Standard Error: 16_355 - .saturating_add(Weight::from_ref_time(11_385_424 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(r as u64))) + (16_855_000 as Weight) + // Standard Error: 27_000 + .saturating_add((8_107_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) - /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { - // Minimum execution time: 27_444 nanoseconds. - Weight::from_ref_time(28_576_394 as u64) - // Standard Error: 4_818 - .saturating_add(Weight::from_ref_time(519_056 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (11_936_000 as Weight) + // Standard Error: 3_000 + .saturating_add((9_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) - /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { - // Minimum execution time: 36_539 nanoseconds. - Weight::from_ref_time(39_339_893 as u64) - // Standard Error: 16_526 - .saturating_add(Weight::from_ref_time(807_457 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (17_582_000 as Weight) + // Standard Error: 14_000 + .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - // Minimum execution time: 50_548 nanoseconds. - Weight::from_ref_time(51_276_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (22_000_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - // Storage: RankedCollective VotingCleanup (r:1 w:0) - // Storage: RankedCollective Voting (r:0 w:2) - /// The range of component `n` is `[0, 100]`. + // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - // Minimum execution time: 16_222 nanoseconds. - Weight::from_ref_time(22_982_955 as u64) - // Standard Error: 3_863 - .saturating_add(Weight::from_ref_time(1_074_054 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + (6_188_000 as Weight) + // Standard Error: 1_000 + .saturating_add((867_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } } @@ -140,72 +127,61 @@ impl WeightInfo for () { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - // Minimum execution time: 24_344 nanoseconds. - Weight::from_ref_time(24_856_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (11_000_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) - /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { - // Minimum execution time: 36_881 nanoseconds. - Weight::from_ref_time(39_284_238 as u64) - // Standard Error: 16_355 - .saturating_add(Weight::from_ref_time(11_385_424 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(r as u64))) + (16_855_000 as Weight) + // Standard Error: 27_000 + .saturating_add((8_107_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) - /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { - // Minimum execution time: 27_444 nanoseconds. - Weight::from_ref_time(28_576_394 as u64) - // Standard Error: 4_818 - .saturating_add(Weight::from_ref_time(519_056 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (11_936_000 as Weight) + // Standard Error: 3_000 + .saturating_add((9_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) - /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { - // Minimum execution time: 36_539 nanoseconds. - Weight::from_ref_time(39_339_893 as u64) - // Standard Error: 16_526 - .saturating_add(Weight::from_ref_time(807_457 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (17_582_000 as Weight) + // Standard Error: 14_000 + .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - // Minimum execution time: 50_548 nanoseconds. - Weight::from_ref_time(51_276_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (22_000_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - // Storage: RankedCollective VotingCleanup (r:1 w:0) - // Storage: RankedCollective Voting (r:0 w:2) - /// The range of component `n` is `[0, 100]`. + // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - // Minimum execution time: 16_222 nanoseconds. - Weight::from_ref_time(22_982_955 as u64) - // Standard Error: 3_863 - .saturating_add(Weight::from_ref_time(1_074_054 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + (6_188_000 as Weight) + // Standard Error: 1_000 + .saturating_add((867_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } } diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index fb33b88d2dfab..396555b3e2758 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -36,7 +36,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/recovery/src/benchmarking.rs b/frame/recovery/src/benchmarking.rs index 870543d9bd290..5354de6d10b51 100644 --- a/frame/recovery/src/benchmarking.rs +++ b/frame/recovery/src/benchmarking.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::Bounded; const SEED: u32 = 0; const DEFAULT_DELAY: u32 = 0; -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -106,25 +106,22 @@ benchmarks! { as_recovered { let caller: T::AccountId = whitelisted_caller(); let recovered_account: T::AccountId = account("recovered_account", 0, SEED); - let recovered_account_lookup = T::Lookup::unlookup(recovered_account.clone()); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::insert(&caller, &recovered_account); }: _( RawOrigin::Signed(caller), - recovered_account_lookup, + recovered_account, Box::new(call) ) set_recovered { let lost: T::AccountId = whitelisted_caller(); - let lost_lookup = T::Lookup::unlookup(lost.clone()); let rescuer: T::AccountId = whitelisted_caller(); - let rescuer_lookup = T::Lookup::unlookup(rescuer.clone()); }: _( RawOrigin::Root, - lost_lookup, - rescuer_lookup + lost.clone(), + rescuer.clone() ) verify { assert_last_event::( Event::AccountRecovered { @@ -156,12 +153,11 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let lost_account: T::AccountId = account("lost_account", 0, SEED); - let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); insert_recovery_account::(&caller, &lost_account); }: _( RawOrigin::Signed(caller.clone()), - lost_account_lookup + lost_account.clone() ) verify { assert_last_event::( Event::RecoveryInitiated { @@ -176,10 +172,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); - let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); - let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); - // Create friends let friends = add_caller_and_generate_friends::(caller.clone(), n); @@ -213,8 +206,8 @@ benchmarks! { }: _( RawOrigin::Signed(caller.clone()), - lost_account_lookup, - rescuer_account_lookup + lost_account.clone(), + rescuer_account.clone() ) verify { assert_last_event::( Event::RecoveryVouched { @@ -230,7 +223,6 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); - let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -265,7 +257,7 @@ benchmarks! { >::insert(&lost_account, &caller, recovery_status); }: _( RawOrigin::Signed(caller.clone()), - lost_account_lookup + lost_account.clone() ) verify { assert_last_event::( Event::AccountRecovered { @@ -278,7 +270,6 @@ benchmarks! { close_recovery { let caller: T::AccountId = whitelisted_caller(); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); - let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); let n in 1 .. T::MaxFriends::get(); @@ -316,7 +307,7 @@ benchmarks! { >::insert(&caller, &rescuer_account, recovery_status); }: _( RawOrigin::Signed(caller.clone()), - rescuer_account_lookup + rescuer_account.clone() ) verify { assert_last_event::( Event::RecoveryClosed { @@ -365,7 +356,6 @@ benchmarks! { cancel_recovered { let caller: T::AccountId = whitelisted_caller(); let account: T::AccountId = account("account", 0, SEED); - let account_lookup = T::Lookup::unlookup(account.clone()); frame_system::Pallet::::inc_providers(&caller); @@ -374,7 +364,7 @@ benchmarks! { Proxy::::insert(&caller, &account); }: _( RawOrigin::Signed(caller), - account_lookup + account ) impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 18d3d48dc024c..b839d25e32b47 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -156,12 +156,13 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, StaticLookup}; +use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion}; use sp_std::prelude::*; use frame_support::{ - dispatch::{GetDispatchInfo, PostDispatchInfo}, + dispatch::PostDispatchInfo, traits::{BalanceStatus, Currency, ReservableCurrency}, + weights::GetDispatchInfo, BoundedVec, RuntimeDebug, }; @@ -181,7 +182,6 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type FriendsOf = BoundedVec<::AccountId, ::MaxFriends>; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An active recovery process. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -225,14 +225,14 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The overarching call type. - type RuntimeCall: Parameter - + Dispatchable + type Call: Parameter + + Dispatchable + GetDispatchInfo + From>; @@ -382,11 +382,10 @@ pub mod pallet { )})] pub fn as_recovered( origin: OriginFor, - account: AccountIdLookupOf, - call: Box<::RuntimeCall>, + account: T::AccountId, + call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; - let account = T::Lookup::lookup(account)?; // Check `who` is allowed to make a call on behalf of `account` let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; ensure!(target == account, Error::::NotAllowed); @@ -406,12 +405,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_recovered())] pub fn set_recovered( origin: OriginFor, - lost: AccountIdLookupOf, - rescuer: AccountIdLookupOf, + lost: T::AccountId, + rescuer: T::AccountId, ) -> DispatchResult { ensure_root(origin)?; - let lost = T::Lookup::lookup(lost)?; - let rescuer = T::Lookup::lookup(rescuer)?; // Create the recovery storage item. >::insert(&rescuer, &lost); Self::deposit_event(Event::::AccountRecovered { @@ -452,7 +449,7 @@ pub mod pallet { ensure!(!friends.is_empty(), Error::::NotEnoughFriends); ensure!(threshold as usize <= friends.len(), Error::::NotEnoughFriends); let bounded_friends: FriendsOf = - friends.try_into().map_err(|_| Error::::MaxFriends)?; + friends.try_into().map_err(|()| Error::::MaxFriends)?; ensure!(Self::is_sorted_and_unique(&bounded_friends), Error::::NotSorted); // Total deposit is base fee + number of friends * factor fee let friend_deposit = T::FriendDepositFactor::get() @@ -489,12 +486,8 @@ pub mod pallet { /// - `account`: The lost account that you want to recover. This account needs to be /// recoverable (i.e. have a recovery configuration). #[pallet::weight(T::WeightInfo::initiate_recovery())] - pub fn initiate_recovery( - origin: OriginFor, - account: AccountIdLookupOf, - ) -> DispatchResult { + pub fn initiate_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; - let account = T::Lookup::lookup(account)?; // Check that the account is recoverable ensure!(>::contains_key(&account), Error::::NotRecoverable); // Check that the recovery process has not already been started @@ -535,12 +528,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vouch_recovery(T::MaxFriends::get()))] pub fn vouch_recovery( origin: OriginFor, - lost: AccountIdLookupOf, - rescuer: AccountIdLookupOf, + lost: T::AccountId, + rescuer: T::AccountId, ) -> DispatchResult { let who = ensure_signed(origin)?; - let lost = T::Lookup::lookup(lost)?; - let rescuer = T::Lookup::lookup(rescuer)?; // Get the recovery configuration for the lost account. let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer. @@ -554,7 +545,7 @@ pub mod pallet { Err(pos) => active_recovery .friends .try_insert(pos, who.clone()) - .map_err(|_| Error::::MaxFriends)?, + .map_err(|()| Error::::MaxFriends)?, } // Update storage with the latest details >::insert(&lost, &rescuer, active_recovery); @@ -576,12 +567,8 @@ pub mod pallet { /// - `account`: The lost account that you want to claim has been successfully recovered by /// you. #[pallet::weight(T::WeightInfo::claim_recovery(T::MaxFriends::get()))] - pub fn claim_recovery( - origin: OriginFor, - account: AccountIdLookupOf, - ) -> DispatchResult { + pub fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; - let account = T::Lookup::lookup(account)?; // Get the recovery configuration for the lost account let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; @@ -623,12 +610,8 @@ pub mod pallet { /// Parameters: /// - `rescuer`: The account trying to rescue this recoverable account. #[pallet::weight(T::WeightInfo::close_recovery(T::MaxFriends::get()))] - pub fn close_recovery( - origin: OriginFor, - rescuer: AccountIdLookupOf, - ) -> DispatchResult { + pub fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; - let rescuer = T::Lookup::lookup(rescuer)?; // Take the active recovery process started by the rescuer for this account. let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; @@ -682,12 +665,8 @@ pub mod pallet { /// Parameters: /// - `account`: The recovered account you are able to call on-behalf-of. #[pallet::weight(T::WeightInfo::cancel_recovered())] - pub fn cancel_recovered( - origin: OriginFor, - account: AccountIdLookupOf, - ) -> DispatchResult { + pub fn cancel_recovered(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; - let account = T::Lookup::lookup(account)?; // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); Proxy::::remove(&who); diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 2a29390fdd20f..44fc4d72a4a5f 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -47,7 +47,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -55,8 +55,8 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -64,7 +64,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -87,7 +87,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -102,9 +102,9 @@ parameter_types! { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); - type RuntimeCall = RuntimeCall; + type Call = Call; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index b037a8110147d..a900a5b6bfa2a 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -20,8 +20,8 @@ use super::*; use frame_support::{assert_noop, assert_ok, bounded_vec, traits::Currency}; use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, MaxFriends, Recovery, RecoveryCall, - RuntimeCall, RuntimeOrigin, Test, + new_test_ext, run_to_block, Balances, BalancesCall, Call, MaxFriends, Origin, Recovery, + RecoveryCall, Test, }; use sp_runtime::traits::BadOrigin; @@ -41,12 +41,12 @@ fn basic_setup_works() { fn set_recovered_works() { new_test_ext().execute_with(|| { // Not accessible by a normal user - assert_noop!(Recovery::set_recovered(RuntimeOrigin::signed(1), 5, 1), BadOrigin); + assert_noop!(Recovery::set_recovered(Origin::signed(1), 5, 1), BadOrigin); // Root can set a recovered account though - assert_ok!(Recovery::set_recovered(RuntimeOrigin::root(), 5, 1)); + assert_ok!(Recovery::set_recovered(Origin::root(), 5, 1)); // Account 1 should now be able to make a call through account 5 - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 100 })); - assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 1, value: 100 })); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 has successfully drained the funds from account 5 assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(5), 0); @@ -60,46 +60,38 @@ fn recovery_life_cycle_works() { let threshold = 3; let delay_period = 10; // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), - friends, - threshold, - delay_period - )); + assert_ok!(Recovery::create_recovery(Origin::signed(5), friends, threshold, delay_period)); // Some time has passed, and the user lost their keys! run_to_block(10); // Using account 1, the user begins the recovery process to recover the lost account - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Off chain, the user contacts their friends and asks them to vouch for the recovery // attempt - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); // We met the threshold, lets try to recover the account...? - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::DelayPeriod - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); // We need to wait at least the delay_period number of blocks before we can recover run_to_block(20); - assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); // Account 1 can use account 5 to close the active recovery process, claiming the deposited // funds used to initiate the recovery process into account 5. - let call = Box::new(RuntimeCall::Recovery(RecoveryCall::close_recovery { rescuer: 1 })); - assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); + let call = Box::new(Call::Recovery(RecoveryCall::close_recovery { rescuer: 1 })); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 can then use account 5 to remove the recovery configuration, claiming the // deposited funds used to create the recovery configuration into account 5. - let call = Box::new(RuntimeCall::Recovery(RecoveryCall::remove_recovery {})); - assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); + let call = Box::new(Call::Recovery(RecoveryCall::remove_recovery {})); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 should now be able to make a call through account 5 to get all of their funds assert_eq!(Balances::free_balance(5), 110); - let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 110 })); - assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 1, value: 110 })); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // All funds have been fully recovered! assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(5), 0); // Remove the proxy link. - assert_ok!(Recovery::cancel_recovered(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::cancel_recovered(Origin::signed(1), 5)); // All storage items are removed from the module assert!(!>::contains_key(&5, &1)); @@ -115,52 +107,38 @@ fn malicious_recovery_fails() { let threshold = 3; let delay_period = 10; // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), - friends, - threshold, - delay_period - )); + assert_ok!(Recovery::create_recovery(Origin::signed(5), friends, threshold, delay_period)); // Some time has passed, and account 1 wants to try and attack this account! run_to_block(10); // Using account 1, the malicious user begins the recovery process on account 5 - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Off chain, the user **tricks** their friends and asks them to vouch for the recovery - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); // shame on you - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // shame on you - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); // shame on you // We met the threshold, lets try to recover the account...? - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::DelayPeriod - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); // Account 1 needs to wait... run_to_block(19); // One more block to wait! - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::DelayPeriod - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); // Account 5 checks their account every `delay_period` and notices the malicious attack! // Account 5 can close the recovery process before account 1 can claim it - assert_ok!(Recovery::close_recovery(RuntimeOrigin::signed(5), 1)); + assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); // By doing so, account 5 has now claimed the deposit originally reserved by account 1 assert_eq!(Balances::total_balance(&1), 90); // Thanks for the free money! assert_eq!(Balances::total_balance(&5), 110); // The recovery process has been closed, so account 1 can't make the claim run_to_block(20); - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::NotStarted - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); // Account 5 can remove their recovery config and pick some better friends - assert_ok!(Recovery::remove_recovery(RuntimeOrigin::signed(5))); + assert_ok!(Recovery::remove_recovery(Origin::signed(5))); assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), vec![22, 33, 44], threshold, delay_period @@ -173,23 +151,23 @@ fn create_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // No friends assert_noop!( - Recovery::create_recovery(RuntimeOrigin::signed(5), vec![], 1, 0), + Recovery::create_recovery(Origin::signed(5), vec![], 1, 0), Error::::NotEnoughFriends ); // Zero threshold assert_noop!( - Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2], 0, 0), + Recovery::create_recovery(Origin::signed(5), vec![2], 0, 0), Error::::ZeroThreshold ); // Threshold greater than friends length assert_noop!( - Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 3, 4], 4, 0), + Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 4, 0), Error::::NotEnoughFriends ); // Too many friends assert_noop!( Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), vec![1; (MaxFriends::get() + 1) as usize], 1, 0 @@ -198,18 +176,18 @@ fn create_recovery_handles_basic_errors() { ); // Unsorted friends assert_noop!( - Recovery::create_recovery(RuntimeOrigin::signed(5), vec![3, 2, 4], 3, 0), + Recovery::create_recovery(Origin::signed(5), vec![3, 2, 4], 3, 0), Error::::NotSorted ); // Duplicate friends assert_noop!( - Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 2, 4], 3, 0), + Recovery::create_recovery(Origin::signed(5), vec![2, 2, 4], 3, 0), Error::::NotSorted ); // Already configured - assert_ok!(Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 3, 4], 3, 10)); + assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10)); assert_noop!( - Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 3, 4], 3, 10), + Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10), Error::::AlreadyRecoverable ); }); @@ -223,7 +201,7 @@ fn create_recovery_works() { let delay_period = 10; // Account 5 sets up a recovery configuration on their account assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period @@ -247,7 +225,7 @@ fn initiate_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // No recovery process set up for the account assert_noop!( - Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5), + Recovery::initiate_recovery(Origin::signed(1), 5), Error::::NotRecoverable ); // Create a recovery process for next test @@ -255,15 +233,15 @@ fn initiate_recovery_handles_basic_errors() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); // Same user cannot recover same account twice - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); assert_noop!( - Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5), + Recovery::initiate_recovery(Origin::signed(1), 5), Error::::AlreadyStarted ); // No double deposit @@ -279,13 +257,13 @@ fn initiate_recovery_works() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); // Recovery can be initiated - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Deposit is reserved assert_eq!(Balances::reserved_balance(1), 10); // Recovery status object is created correctly @@ -293,7 +271,7 @@ fn initiate_recovery_works() { ActiveRecovery { created: 0, deposit: 10, friends: Default::default() }; assert_eq!(>::get(&5, &1), Some(recovery_status)); // Multiple users can attempt to recover the same account - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(2), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); }); } @@ -302,7 +280,7 @@ fn vouch_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot vouch for non-recoverable account assert_noop!( - Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1), + Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotRecoverable ); // Create a recovery process for next tests @@ -310,27 +288,21 @@ fn vouch_recovery_handles_basic_errors() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); // Cannot vouch a recovery process that has not started - assert_noop!( - Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1), - Error::::NotStarted - ); + assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotStarted); // Initiate a recovery process - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Cannot vouch if you are not a friend - assert_noop!( - Recovery::vouch_recovery(RuntimeOrigin::signed(22), 5, 1), - Error::::NotFriend - ); + assert_noop!(Recovery::vouch_recovery(Origin::signed(22), 5, 1), Error::::NotFriend); // Cannot vouch twice - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); assert_noop!( - Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1), + Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::AlreadyVouched ); }); @@ -344,17 +316,17 @@ fn vouch_recovery_works() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Vouching works - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); // Handles out of order vouches - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // Final recovery status object is updated correctly let recovery_status = ActiveRecovery { created: 0, deposit: 10, friends: bounded_vec![2, 3, 4] }; @@ -366,40 +338,28 @@ fn vouch_recovery_works() { fn claim_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot claim a non-recoverable account - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::NotRecoverable - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotRecoverable); // Create a recovery process for the test let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); // Cannot claim an account which has not started the recovery process - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::NotStarted - ); - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Cannot claim an account which has not passed the delay period - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::DelayPeriod - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); run_to_block(11); // Cannot claim an account which has not passed the threshold number of votes - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // Only 2/3 is not good enough - assert_noop!( - Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), - Error::::Threshold - ); + assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::Threshold); }); } @@ -411,32 +371,32 @@ fn claim_recovery_works() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); run_to_block(11); // Account can be recovered. - assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); // Recovered storage item is correctly created assert_eq!(>::get(&1), Some(5)); // Account could be re-recovered in the case that the recoverer account also gets lost. - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(4), 5)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 4)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 4)); - assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 4)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(4), 5)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 4)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 4)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 4)); run_to_block(21); // Account is re-recovered. - assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(4), 5)); + assert_ok!(Recovery::claim_recovery(Origin::signed(4), 5)); // Recovered storage item is correctly updated assert_eq!(>::get(&4), Some(5)); }); @@ -446,10 +406,7 @@ fn claim_recovery_works() { fn close_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot close a non-active recovery - assert_noop!( - Recovery::close_recovery(RuntimeOrigin::signed(5), 1), - Error::::NotStarted - ); + assert_noop!(Recovery::close_recovery(Origin::signed(5), 1), Error::::NotStarted); }); } @@ -457,35 +414,26 @@ fn close_recovery_handles_basic_errors() { fn remove_recovery_works() { new_test_ext().execute_with(|| { // Cannot remove an unrecoverable account - assert_noop!( - Recovery::remove_recovery(RuntimeOrigin::signed(5)), - Error::::NotRecoverable - ); + assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::NotRecoverable); // Create and initiate a recovery process for the test let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(5), + Origin::signed(5), friends.clone(), threshold, delay_period )); - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); - assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(2), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); // Cannot remove a recovery when there are active recoveries. - assert_noop!( - Recovery::remove_recovery(RuntimeOrigin::signed(5)), - Error::::StillActive - ); - assert_ok!(Recovery::close_recovery(RuntimeOrigin::signed(5), 1)); + assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::StillActive); + assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); // Still need to remove one more! - assert_noop!( - Recovery::remove_recovery(RuntimeOrigin::signed(5)), - Error::::StillActive - ); - assert_ok!(Recovery::close_recovery(RuntimeOrigin::signed(5), 2)); + assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::StillActive); + assert_ok!(Recovery::close_recovery(Origin::signed(5), 2)); // Finally removed - assert_ok!(Recovery::remove_recovery(RuntimeOrigin::signed(5))); + assert_ok!(Recovery::remove_recovery(Origin::signed(5))); }); } diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 39a8d09a38261..0887180a533fc 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_recovery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/recovery/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/recovery/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -63,85 +60,71 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - // Minimum execution time: 10_672 nanoseconds. - Weight::from_ref_time(10_946_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (6_579_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - // Minimum execution time: 17_092 nanoseconds. - Weight::from_ref_time(17_660_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (13_402_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { - // Minimum execution time: 32_800 nanoseconds. - Weight::from_ref_time(33_769_078 as u64) - // Standard Error: 4_075 - .saturating_add(Weight::from_ref_time(252_382 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (28_217_000 as Weight) + // Standard Error: 13_000 + .saturating_add((172_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - // Minimum execution time: 39_224 nanoseconds. - Weight::from_ref_time(39_663_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (34_082_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { - // Minimum execution time: 27_158 nanoseconds. - Weight::from_ref_time(28_130_506 as u64) - // Standard Error: 4_523 - .saturating_add(Weight::from_ref_time(321_436 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (22_038_000 as Weight) + // Standard Error: 19_000 + .saturating_add((307_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { - // Minimum execution time: 36_269 nanoseconds. - Weight::from_ref_time(36_966_173 as u64) - // Standard Error: 5_016 - .saturating_add(Weight::from_ref_time(223_069 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (28_621_000 as Weight) + // Standard Error: 13_000 + .saturating_add((353_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { - // Minimum execution time: 40_213 nanoseconds. - Weight::from_ref_time(41_140_968 as u64) - // Standard Error: 3_822 - .saturating_add(Weight::from_ref_time(163_217 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (33_287_000 as Weight) + // Standard Error: 19_000 + .saturating_add((264_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { - // Minimum execution time: 38_740 nanoseconds. - Weight::from_ref_time(39_710_400 as u64) - // Standard Error: 5_554 - .saturating_add(Weight::from_ref_time(224_200 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (31_964_000 as Weight) + // Standard Error: 13_000 + .saturating_add((222_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - // Minimum execution time: 20_316 nanoseconds. - Weight::from_ref_time(20_912_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (12_702_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } @@ -149,84 +132,70 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - // Minimum execution time: 10_672 nanoseconds. - Weight::from_ref_time(10_946_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (6_579_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - // Minimum execution time: 17_092 nanoseconds. - Weight::from_ref_time(17_660_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (13_402_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { - // Minimum execution time: 32_800 nanoseconds. - Weight::from_ref_time(33_769_078 as u64) - // Standard Error: 4_075 - .saturating_add(Weight::from_ref_time(252_382 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (28_217_000 as Weight) + // Standard Error: 13_000 + .saturating_add((172_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - // Minimum execution time: 39_224 nanoseconds. - Weight::from_ref_time(39_663_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (34_082_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { - // Minimum execution time: 27_158 nanoseconds. - Weight::from_ref_time(28_130_506 as u64) - // Standard Error: 4_523 - .saturating_add(Weight::from_ref_time(321_436 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (22_038_000 as Weight) + // Standard Error: 19_000 + .saturating_add((307_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { - // Minimum execution time: 36_269 nanoseconds. - Weight::from_ref_time(36_966_173 as u64) - // Standard Error: 5_016 - .saturating_add(Weight::from_ref_time(223_069 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (28_621_000 as Weight) + // Standard Error: 13_000 + .saturating_add((353_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { - // Minimum execution time: 40_213 nanoseconds. - Weight::from_ref_time(41_140_968 as u64) - // Standard Error: 3_822 - .saturating_add(Weight::from_ref_time(163_217 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (33_287_000 as Weight) + // Standard Error: 19_000 + .saturating_add((264_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) - /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { - // Minimum execution time: 38_740 nanoseconds. - Weight::from_ref_time(39_710_400 as u64) - // Standard Error: 5_554 - .saturating_add(Weight::from_ref_time(224_200 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (31_964_000 as Weight) + // Standard Error: 13_000 + .saturating_add((222_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - // Minimum execution time: 20_316 nanoseconds. - Weight::from_ref_time(20_912_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (12_702_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/referenda/Cargo.toml b/frame/referenda/Cargo.toml index 4e68d7528ad8a..508f5a5ef8688 100644 --- a/frame/referenda/Cargo.toml +++ b/frame/referenda/Cargo.toml @@ -38,7 +38,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "sp-runtime/std", "sp-arithmetic/std", diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index bc6fb31bf1127..9abd3768f780c 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -20,292 +20,266 @@ use super::*; use crate::Pallet as Referenda; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; +use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ assert_ok, - dispatch::UnfilteredDispatchable, - traits::{Bounded, Currency, EnsureOrigin}, + traits::{Currency, EnsureOrigin}, }; use frame_system::RawOrigin; -use sp_runtime::traits::Bounded as ArithBounded; +use sp_runtime::traits::{Bounded, Hash}; const SEED: u32 = 0; #[allow(dead_code)] -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { +fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn dummy_call, I: 'static>() -> Bounded<>::RuntimeCall> { - let inner = frame_system::Call::remark { remark: vec![] }; - let call = >::RuntimeCall::from(inner); - T::Preimages::bound(call).unwrap() -} - -fn create_referendum, I: 'static>() -> (T::RuntimeOrigin, ReferendumIndex) { - let origin: T::RuntimeOrigin = T::SubmitOrigin::successful_origin(); - if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - whitelist_account!(caller); - } - - let proposal_origin = Box::new(RawOrigin::Root.into()); - let proposal = dummy_call::(); - let enactment_moment = DispatchTime::After(0u32.into()); - let call = crate::Call::::submit { proposal_origin, proposal, enactment_moment }; - assert_ok!(call.dispatch_bypass_filter(origin.clone())); - let index = ReferendumCount::::get() - 1; - (origin, index) +fn create_referendum() -> (T::AccountId, ReferendumIndex) { + let caller = funded_account::("caller", 0); + whitelist_account!(caller); + assert_ok!(Referenda::::submit( + RawOrigin::Signed(caller.clone()).into(), + Box::new(RawOrigin::Root.into()), + T::Hashing::hash_of(&0), + DispatchTime::After(0u32.into()) + )); + let index = ReferendumCount::::get() - 1; + (caller, index) } -fn place_deposit, I: 'static>(index: ReferendumIndex) { - let caller = funded_account::("caller", 0); +fn place_deposit(index: ReferendumIndex) { + let caller = funded_account::("caller", 0); whitelist_account!(caller); - assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); + assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); } -fn nudge, I: 'static>(index: ReferendumIndex) { - assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); +fn nudge(index: ReferendumIndex) { + assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); } -fn fill_queue, I: 'static>( +fn fill_queue( index: ReferendumIndex, spaces: u32, pass_after: u32, ) -> Vec { // First, create enough other referendums to fill the track. let mut others = vec![]; - for _ in 0..info::(index).max_deciding { - let (_origin, index) = create_referendum::(); - place_deposit::(index); + for _ in 0..info::(index).max_deciding { + let (_caller, index) = create_referendum::(); + place_deposit::(index); others.push(index); } // We will also need enough referenda which are queued and passing, we want `MaxQueued - 1` // in order to force the maximum amount of work to insert ours into the queue. for _ in spaces..T::MaxQueued::get() { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - make_passing_after::(index, Perbill::from_percent(pass_after)); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing_after::(index, Perbill::from_percent(pass_after)); others.push(index); } // Skip to when they can start being decided. - skip_prepare_period::(index); + skip_prepare_period::(index); // Manually nudge the other referenda first to ensure that they begin. - others.iter().for_each(|&i| nudge::(i)); + others.iter().for_each(|&i| nudge::(i)); others } -fn info, I: 'static>(index: ReferendumIndex) -> &'static TrackInfoOf { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn info(index: ReferendumIndex) -> &'static TrackInfoOf { + let status = Referenda::::ensure_ongoing(index).unwrap(); T::Tracks::info(status.track).expect("Id value returned from T::Tracks") } -fn make_passing_after, I: 'static>(index: ReferendumIndex, period_portion: Perbill) { - // We add an extra 1 percent to handle any perbill rounding errors which may cause - // a proposal to not actually pass. - let support = info::(index) - .min_support - .threshold(period_portion) - .saturating_add(Perbill::from_percent(1)); - let approval = info::(index) - .min_approval - .threshold(period_portion) - .saturating_add(Perbill::from_percent(1)); - Referenda::::access_poll(index, |status| { +fn make_passing_after(index: ReferendumIndex, period_portion: Perbill) { + let support = info::(index).min_support.threshold(period_portion); + let approval = info::(index).min_approval.threshold(period_portion); + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { - T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::from_requirements(support, approval, class); } }); } -fn make_passing, I: 'static>(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_passing(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { - T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::unanimity(class); } }); } -fn make_failing, I: 'static>(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_failing(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { - T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::rejection(class); } }); } -fn skip_prepare_period, I: 'static>(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let prepare_period_over = status.submitted + info::(index).prepare_period; +fn skip_prepare_period(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let prepare_period_over = status.submitted + info::(index).prepare_period; frame_system::Pallet::::set_block_number(prepare_period_over); } -fn skip_decision_period, I: 'static>(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; +fn skip_decision_period(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; frame_system::Pallet::::set_block_number(decision_period_over); } -fn skip_confirm_period, I: 'static>(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_confirm_period(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let confirm_period_over = status.deciding.unwrap().confirming.unwrap(); frame_system::Pallet::::set_block_number(confirm_period_over); } -fn skip_timeout_period, I: 'static>(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_timeout_period(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let timeout_period_over = status.submitted + T::UndecidingTimeout::get(); frame_system::Pallet::::set_block_number(timeout_period_over); } -fn alarm_time, I: 'static>(index: ReferendumIndex) -> T::BlockNumber { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn alarm_time(index: ReferendumIndex) -> T::BlockNumber { + let status = Referenda::::ensure_ongoing(index).unwrap(); status.alarm.unwrap().0 } -fn is_confirming, I: 'static>(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_confirming(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: Some(_), .. }), .. } ) } -fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_not_confirming(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: None, .. }), .. } ) } -benchmarks_instance_pallet! { +benchmarks! { submit { - let origin: T::RuntimeOrigin = T::SubmitOrigin::successful_origin(); - if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - whitelist_account!(caller); - } - }: _( - origin, + let caller = funded_account::("caller", 0); + whitelist_account!(caller); + }: _( + RawOrigin::Signed(caller), Box::new(RawOrigin::Root.into()), - dummy_call::(), + T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) ) verify { - let index = ReferendumCount::::get().checked_sub(1).unwrap(); - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); + let index = ReferendumCount::::get().checked_sub(1).unwrap(); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); } place_decision_deposit_preparing { - let (origin, index) = create_referendum::(); - }: place_decision_deposit(origin, index) + let (caller, index) = create_referendum::(); + }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); + assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } place_decision_deposit_queued { - let (origin, index) = create_referendum::(); - fill_queue::(index, 1, 90); - }: place_decision_deposit(origin, index) + let (caller, index) = create_referendum::(); + fill_queue::(index, 1, 90); + }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).contains(&(index, 0u32.into()))); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } place_decision_deposit_not_queued { - let (origin, index) = create_referendum::(); - fill_queue::(index, 0, 90); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); - }: place_decision_deposit(origin, index) + let (caller, index) = create_referendum::(); + fill_queue::(index, 0, 90); + }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } place_decision_deposit_passing { - let (origin, index) = create_referendum::(); - skip_prepare_period::(index); - make_passing::(index); - }: place_decision_deposit(origin, index) + let (caller, index) = create_referendum::(); + skip_prepare_period::(index); + make_passing::(index); + }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } place_decision_deposit_failing { - let (origin, index) = create_referendum::(); - skip_prepare_period::(index); - }: place_decision_deposit(origin, index) + let (caller, index) = create_referendum::(); + skip_prepare_period::(index); + }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } refund_decision_deposit { - let (origin, index) = create_referendum::(); - place_deposit::(index); - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - }: _(origin, index) + let (caller, index) = create_referendum::(); + place_deposit::(index); + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + }: _(RawOrigin::Signed(caller), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } cancel { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - }: _(T::CancelOrigin::successful_origin(), index) + let (_caller, index) = create_referendum::(); + place_deposit::(index); + }: _(T::CancelOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); } kill { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - }: _(T::KillOrigin::successful_origin(), index) + let (_caller, index) = create_referendum::(); + place_deposit::(index); + }: _(T::KillOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); } one_fewer_deciding_queue_empty { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - assert_eq!(DecidingCount::::get(&track), 1); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + assert_eq!(DecidingCount::::get(&track), 1); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), 0); + assert_eq!(DecidingCount::::get(&track), 0); } one_fewer_deciding_failing { - let (_origin, index) = create_referendum::(); + let (_caller, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 90); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 90); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding .map_or(true, |d| d.confirming.is_none()) @@ -313,63 +287,63 @@ benchmarks_instance_pallet! { } one_fewer_deciding_passing { - let (_origin, index) = create_referendum::(); + let (_caller, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 0); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).filter(|i| Referenda::::ensure_ongoing(*i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding - .map_or(false, |d| d.confirming.is_some()) - ).count() == 1); + .map_or(true, |d| d.confirming.is_some()) + )); } nudge_referendum_requeued_insertion { // First create our referendum and place the deposit. It will be failing. - let (_origin, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 90); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 90); // Now nudge ours, with the track now full and the queue full of referenda with votes, // ours will not be in the queue. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); // Now alter the voting, so that ours goes into pole-position and shifts others down. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } nudge_referendum_requeued_slide { // First create our referendum and place the deposit. It will be failing. - let (_origin, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 90); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 90); // Now nudge ours, with the track now full, ours will be queued, but with no votes, it // will have the worst position. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); // Now alter the voting, so that ours leap-frogs all into the best position. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } @@ -380,159 +354,159 @@ benchmarks_instance_pallet! { // insertion at the beginning. // First create our referendum and place the deposit. It will be failing. - let (_origin, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 0); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } nudge_referendum_not_queued { // First create our referendum and place the deposit. It will be failing. - let (_origin, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 0); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } nudge_referendum_no_deposit { - let (_origin, index) = create_referendum::(); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_preparing { - let (_origin, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_timed_out { - let (_origin, index) = create_referendum::(); - skip_timeout_period::(index); + let (_caller, index) = create_referendum::(); + skip_timeout_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::TimedOut(..)); } nudge_referendum_begin_deciding_failing { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } nudge_referendum_begin_deciding_passing { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_begin_confirming { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - make_passing::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_end_confirming { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - assert!(is_confirming::(index)); - make_failing::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + assert!(is_confirming::(index)); + make_failing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(!is_confirming::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_not_confirming { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - let old_alarm = alarm_time::(index); - make_passing_after::(index, Perbill::from_percent(50)); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + let old_alarm = alarm_time::(index); + make_passing_after::(index, Perbill::from_percent(50)); }: nudge_referendum(RawOrigin::Root, index) verify { - assert_ne!(old_alarm, alarm_time::(index)); - assert!(!is_confirming::(index)); + assert_ne!(old_alarm, alarm_time::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_confirming { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(is_confirming::(index)); - let old_alarm = alarm_time::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(is_confirming::(index)); + let old_alarm = alarm_time::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_approved { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - skip_confirm_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + skip_confirm_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Approved(..)); } nudge_referendum_rejected { - let (_origin, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_failing::(index); - nudge::(index); - skip_decision_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_failing::(index); + nudge::(index); + skip_decision_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Rejected(..)); } diff --git a/frame/referenda/src/branch.rs b/frame/referenda/src/branch.rs index d3744979fc547..f381f5fe5b709 100644 --- a/frame/referenda/src/branch.rs +++ b/frame/referenda/src/branch.rs @@ -19,7 +19,6 @@ use super::Config; use crate::weights::WeightInfo; -use frame_support::weights::Weight; /// Branches within the `begin_deciding` function. pub enum BeginDecidingBranch { @@ -83,8 +82,7 @@ impl ServiceBranch { /// Return the maximum possible weight of the `nudge` function. pub fn max_weight_of_nudge, I: 'static>() -> frame_support::weights::Weight { - Weight::zero() - .max(T::WeightInfo::nudge_referendum_no_deposit()) + 0.max(T::WeightInfo::nudge_referendum_no_deposit()) .max(T::WeightInfo::nudge_referendum_preparing()) .max(T::WeightInfo::nudge_referendum_queued()) .max(T::WeightInfo::nudge_referendum_not_queued()) @@ -107,7 +105,7 @@ impl ServiceBranch { self, ) -> Option { use ServiceBranch::*; - let ref_time_weight = match self { + Some(match self { Preparing => T::WeightInfo::place_decision_deposit_preparing(), Queued => T::WeightInfo::place_decision_deposit_queued(), NotQueued => T::WeightInfo::place_decision_deposit_not_queued(), @@ -124,15 +122,12 @@ impl ServiceBranch { TimedOut | Fail | NoDeposit => return None, - }; - - Some(ref_time_weight) + }) } /// Return the maximum possible weight of the `place_decision_deposit` function. pub fn max_weight_of_deposit, I: 'static>() -> frame_support::weights::Weight { - Weight::zero() - .max(T::WeightInfo::place_decision_deposit_preparing()) + 0.max(T::WeightInfo::place_decision_deposit_preparing()) .max(T::WeightInfo::place_decision_deposit_queued()) .max(T::WeightInfo::place_decision_deposit_not_queued()) .max(T::WeightInfo::place_decision_deposit_passing()) @@ -172,8 +167,7 @@ impl OneFewerDecidingBranch { /// Return the maximum possible weight of the `one_fewer_deciding` function. pub fn max_weight, I: 'static>() -> frame_support::weights::Weight { - Weight::zero() - .max(T::WeightInfo::one_fewer_deciding_queue_empty()) + 0.max(T::WeightInfo::one_fewer_deciding_queue_empty()) .max(T::WeightInfo::one_fewer_deciding_passing()) .max(T::WeightInfo::one_fewer_deciding_failing()) } diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index d060c3db3fa70..15c5562d64c84 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -69,11 +69,11 @@ use frame_support::{ ensure, traits::{ schedule::{ - v3::{Anon as ScheduleAnon, Named as ScheduleNamed}, - DispatchTime, + v2::{Anon as ScheduleAnon, Named as ScheduleNamed}, + DispatchTime, MaybeHashed, }, - Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, - ReservableCurrency, StorePreimage, VoteTally, + Currency, Get, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, + ReservableCurrency, VoteTally, }, BoundedVec, }; @@ -92,10 +92,10 @@ use self::branch::{BeginDecidingBranch, OneFewerDecidingBranch, ServiceBranch}; pub use self::{ pallet::*, types::{ - BalanceOf, BoundedCallOf, CallOf, Curve, DecidingStatus, DecidingStatusOf, Deposit, - InsertSorted, NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex, ReferendumInfo, - ReferendumInfoOf, ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf, TallyOf, - TrackIdOf, TrackInfo, TrackInfoOf, TracksInfo, VotesOf, + BalanceOf, CallOf, Curve, DecidingStatus, DecidingStatusOf, Deposit, InsertSorted, + NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex, ReferendumInfo, ReferendumInfoOf, + ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf, TallyOf, TrackIdOf, TrackInfo, + TrackInfoOf, TracksInfo, VotesOf, }, weights::WeightInfo, }; @@ -108,30 +108,6 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -pub use frame_support::traits::Get; -pub use sp_std::vec::Vec; - -#[macro_export] -macro_rules! impl_tracksinfo_get { - ($tracksinfo:ty, $balance:ty, $blocknumber:ty) => { - impl - $crate::Get< - $crate::Vec<( - <$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::Id, - $crate::TrackInfo<$balance, $blocknumber>, - )>, - > for $tracksinfo - { - fn get() -> $crate::Vec<( - <$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::Id, - $crate::TrackInfo<$balance, $blocknumber>, - )> { - <$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::tracks().to_vec() - } - } - }; -} - const ASSEMBLY_ID: LockIdentifier = *b"assembly"; #[frame_support::pallet] @@ -147,27 +123,31 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { // System level stuff. - type RuntimeCall: Parameter - + Dispatchable - + From> - + IsType<::RuntimeCall> - + From>; - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Call: Parameter + Dispatchable + From>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. - type Scheduler: ScheduleAnon, PalletsOriginOf> - + ScheduleNamed, PalletsOriginOf>; + type Scheduler: ScheduleAnon< + Self::BlockNumber, + CallOf, + PalletsOriginOf, + Hash = Self::Hash, + > + ScheduleNamed< + Self::BlockNumber, + CallOf, + PalletsOriginOf, + Hash = Self::Hash, + >; /// Currency type for this pallet. type Currency: ReservableCurrency; // Origins and unbalances. /// Origin from which proposals may be submitted. - type SubmitOrigin: EnsureOrigin; + type SubmitOrigin: EnsureOrigin; /// Origin from which any vote may be cancelled. - type CancelOrigin: EnsureOrigin; + type CancelOrigin: EnsureOrigin; /// Origin from which any vote may be killed. - type KillOrigin: EnsureOrigin; + type KillOrigin: EnsureOrigin; /// Handler for the unbalanced reduction when slashing a preimage deposit. type Slash: OnUnbalanced>; /// The counting type for votes. Usually just balance. @@ -203,20 +183,11 @@ pub mod pallet { // The other stuff. /// Information concerning the different referendum tracks. - #[pallet::constant] - type Tracks: Get< - Vec<( - , Self::BlockNumber>>::Id, - TrackInfo, Self::BlockNumber>, - )>, - > + TracksInfo< - BalanceOf, - Self::BlockNumber, - RuntimeOrigin = ::PalletsOrigin, - >; - - /// The preimage provider. - type Preimages: QueryPreimage + StorePreimage; + type Tracks: TracksInfo< + BalanceOf, + Self::BlockNumber, + Origin = ::PalletsOrigin, + >; } /// The next free referendum index, aka the number of referenda started so far. @@ -249,14 +220,14 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// A referendum has been submitted. + /// A referendum has being submitted. Submitted { /// Index of the referendum. index: ReferendumIndex, /// The track (and by extension proposal dispatch origin) of this referendum. track: TrackIdOf, - /// The proposal for the referendum. - proposal: BoundedCallOf, + /// The hash of the proposal up for referendum. + proposal_hash: T::Hash, }, /// The decision deposit has been placed. DecisionDepositPlaced { @@ -289,8 +260,8 @@ pub mod pallet { index: ReferendumIndex, /// The track (and by extension proposal dispatch origin) of this referendum. track: TrackIdOf, - /// The proposal for the referendum. - proposal: BoundedCallOf, + /// The hash of the proposal up for referendum. + proposal_hash: T::Hash, /// The current tally of votes in this referendum. tally: T::Tally, }, @@ -352,7 +323,7 @@ pub mod pallet { HasDeposit, /// The track identifier given was invalid. BadTrack, - /// There are already a full complement of referenda in progress for this track. + /// There are already a full complement of referendums in progress for this track. Full, /// The queue of the track is empty. QueueEmpty, @@ -377,7 +348,7 @@ pub mod pallet { /// - `origin`: must be `SubmitOrigin` and the account must have `SubmissionDeposit` funds /// available. /// - `proposal_origin`: The origin from which the proposal should be executed. - /// - `proposal`: The proposal. + /// - `proposal_hash`: The hash of the proposal preimage. /// - `enactment_moment`: The moment that the proposal should be enacted. /// /// Emits `Submitted`. @@ -385,7 +356,7 @@ pub mod pallet { pub fn submit( origin: OriginFor, proposal_origin: Box>, - proposal: BoundedCallOf, + proposal_hash: T::Hash, enactment_moment: DispatchTime, ) -> DispatchResult { let who = T::SubmitOrigin::ensure_origin(origin)?; @@ -399,12 +370,11 @@ pub mod pallet { r }); let now = frame_system::Pallet::::block_number(); - let nudge_call = - T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index }))?; + let nudge_call = Call::nudge_referendum { index }; let status = ReferendumStatus { track, origin: *proposal_origin, - proposal: proposal.clone(), + proposal_hash, enactment: enactment_moment, submitted: now, submission_deposit, @@ -416,7 +386,7 @@ pub mod pallet { }; ReferendumInfoFor::::insert(index, ReferendumInfo::Ongoing(status)); - Self::deposit_event(Event::::Submitted { index, track, proposal }); + Self::deposit_event(Event::::Submitted { index, track, proposal_hash }); Ok(()) } @@ -648,8 +618,7 @@ impl, I: 'static> Polling for Pallet { let mut status = ReferendumStatusOf:: { track: class, origin: frame_support::dispatch::RawOrigin::Root.into(), - proposal: T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index })) - .map_err(|_| ())?, + proposal_hash: ::hash_of(&index), enactment: DispatchTime::After(Zero::zero()), submitted: now, submission_deposit: Deposit { who: dummy_account_id, amount: Zero::zero() }, @@ -707,18 +676,18 @@ impl, I: 'static> Pallet { track: &TrackInfoOf, desired: DispatchTime, origin: PalletsOriginOf, - call: BoundedCallOf, + call_hash: T::Hash, ) { let now = frame_system::Pallet::::block_number(); let earliest_allowed = now.saturating_add(track.min_enactment_period); let desired = desired.evaluate(now); let ok = T::Scheduler::schedule_named( - (ASSEMBLY_ID, "enactment", index).using_encoded(sp_io::hashing::blake2_256), + (ASSEMBLY_ID, "enactment", index).encode(), DispatchTime::At(desired.max(earliest_allowed)), None, 63, origin, - call, + MaybeHashed::Hash(call_hash), ) .is_ok(); debug_assert!(ok, "LOGIC ERROR: bake_referendum/schedule_named failed"); @@ -726,18 +695,17 @@ impl, I: 'static> Pallet { /// Set an alarm to dispatch `call` at block number `when`. fn set_alarm( - call: BoundedCallOf, + call: impl Into>, when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); - let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) / - (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); + let when = (when + alarm_interval - One::one()) / alarm_interval * alarm_interval; let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, 128u8, frame_system::RawOrigin::Root.into(), - call, + MaybeHashed::Value(call.into()), ) .ok() .map(|x| (when, x)); @@ -774,7 +742,7 @@ impl, I: 'static> Pallet { Self::deposit_event(Event::::DecisionStarted { index, tally: status.tally.clone(), - proposal: status.proposal.clone(), + proposal_hash: status.proposal_hash, track: status.track, }); let confirming = if is_passing { @@ -784,8 +752,7 @@ impl, I: 'static> Pallet { None }; let deciding_status = DecidingStatus { since: now, confirming }; - let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track) - .max(now.saturating_add(One::one())); + let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track); status.deciding = Some(deciding_status); let branch = if is_passing { BeginDecidingBranch::Passing } else { BeginDecidingBranch::Failing }; @@ -841,21 +808,12 @@ impl, I: 'static> Pallet { let alarm_interval = T::AlarmInterval::get().max(One::one()); let when = (next_block + alarm_interval - One::one()) / alarm_interval * alarm_interval; - let call = match T::Preimages::bound(CallOf::::from(Call::one_fewer_deciding { - track, - })) { - Ok(c) => c, - Err(_) => { - debug_assert!(false, "Unable to create a bounded call from `one_fewer_deciding`??",); - return - }, - }; let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, 128u8, frame_system::RawOrigin::Root.into(), - call, + MaybeHashed::Value(Call::one_fewer_deciding { track }.into()), ); debug_assert!( maybe_result.is_ok(), @@ -878,18 +836,7 @@ impl, I: 'static> Pallet { if status.alarm.as_ref().map_or(true, |&(when, _)| when != alarm) { // Either no alarm or one that was different Self::ensure_no_alarm(status); - let call = - match T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index })) { - Ok(c) => c, - Err(_) => { - debug_assert!( - false, - "Unable to create a bounded call from `nudge_referendum`??", - ); - return false - }, - }; - status.alarm = Self::set_alarm(call, alarm); + status.alarm = Self::set_alarm(Call::nudge_referendum { index }, alarm); true } else { false @@ -908,7 +855,7 @@ impl, I: 'static> Pallet { /// /// In terms of storage, every call to it is expected to access: /// - The scheduler, either to insert, remove or alter an entry; - /// - `TrackQueue`, which should be a `BoundedVec` with a low limit (8-16); + /// - `TrackQueue`, which should be a `BoundedVec` with a low limit (8-16). /// - `DecidingCount`. /// /// Both of the two storage items will only have as many items as there are different tracks, @@ -1005,8 +952,14 @@ impl, I: 'static> Pallet { // Passed! Self::ensure_no_alarm(&mut status); Self::note_one_fewer_deciding(status.track); - let (desired, call) = (status.enactment, status.proposal); - Self::schedule_enactment(index, track, desired, status.origin, call); + let (desired, call_hash) = (status.enactment, status.proposal_hash); + Self::schedule_enactment( + index, + track, + desired, + status.origin, + call_hash, + ); Self::deposit_event(Event::::Confirmed { index, tally: status.tally, diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index c98fbf9a676b1..1a24911603990 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -24,15 +24,14 @@ use frame_support::{ assert_ok, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, - SortedMembers, + PreimageRecipient, SortedMembers, }, - weights::Weight, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, Hash, IdentityLookup}, DispatchResult, Perbill, }; @@ -55,32 +54,31 @@ frame_support::construct_runtime!( // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &RuntimeCall) -> bool { - !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) +impl Contains for BaseFilter { + fn contains(call: &Call) -> bool { + !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) } } parameter_types! { - pub MaxWeight: Weight = Weight::from_ref_time(2_000_000_000_000); pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(MaxWeight::get()); + frame_system::limits::BlockWeights::simple_max(1_000_000); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -93,31 +91,33 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } impl pallet_preimage::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); type Currency = Balances; type ManagerOrigin = EnsureRoot; + type MaxSize = ConstU32<4096>; type BaseDeposit = (); type ByteDeposit = (); } impl pallet_scheduler::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; - type MaximumWeight = MaxWeight; + type Call = Call; + type MaximumWeight = ConstU64<2_000_000_000_000>; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type Preimages = Preimage; + type PreimageProvider = Preimage; + type NoPreimagePostponement = ConstU64<10>; } impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type MaxLocks = ConstU32<10>; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -146,7 +146,7 @@ impl SortedMembers for OneToFive { pub struct TestTracksInfo; impl TracksInfo for TestTracksInfo { type Id = u8; - type RuntimeOrigin = ::PalletsOrigin; + type Origin = ::PalletsOrigin; fn tracks() -> &'static [(Self::Id, TrackInfo)] { static DATA: [(u8, TrackInfo); 2] = [ ( @@ -196,7 +196,7 @@ impl TracksInfo for TestTracksInfo { ]; &DATA[..] } - fn track_for(id: &Self::RuntimeOrigin) -> Result { + fn track_for(id: &Self::Origin) -> Result { if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { match system_origin { frame_system::RawOrigin::Root => Ok(0), @@ -208,12 +208,11 @@ impl TracksInfo for TestTracksInfo { } } } -impl_tracksinfo_get!(TestTracksInfo, u64, u64); impl Config for Test { type WeightInfo = (); - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; + type Call = Call; + type Event = Event; type Scheduler = Scheduler; type Currency = pallet_balances::Pallet; type SubmitOrigin = frame_system::EnsureSigned; @@ -227,7 +226,6 @@ impl Config for Test { type UndecidingTimeout = ConstU64<20>; type AlarmInterval = AlarmInterval; type Tracks = TestTracksInfo; - type Preimages = Preimage; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -291,35 +289,29 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { ayes, nays } } - - #[cfg(feature = "runtime-benchmarks")] - fn setup(_: Class, _: Perbill) {} } pub fn set_balance_proposal(value: u64) -> Vec { - RuntimeCall::Balances(pallet_balances::Call::set_balance { - who: 42, - new_free: value, - new_reserved: 0, - }) - .encode() + Call::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }) + .encode() } -pub fn set_balance_proposal_bounded(value: u64) -> BoundedCallOf { - let c = RuntimeCall::Balances(pallet_balances::Call::set_balance { +pub fn set_balance_proposal_hash(value: u64) -> H256 { + let c = Call::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0, }); - ::bound(c).unwrap() + >::note_preimage(c.encode().try_into().unwrap()); + BlakeTwo256::hash_of(&c) } #[allow(dead_code)] pub fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { Referenda::submit( - RuntimeOrigin::signed(who), + Origin::signed(who), Box::new(frame_system::RawOrigin::Root.into()), - set_balance_proposal_bounded(value), + set_balance_proposal_hash(value), DispatchTime::After(delay), ) } @@ -445,12 +437,12 @@ pub enum RefState { impl RefState { pub fn create(self) -> ReferendumIndex { assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(frame_support::dispatch::RawOrigin::Root.into()), - set_balance_proposal_bounded(1), + set_balance_proposal_hash(1), DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); if matches!(self, RefState::Confirming { immediate: true }) { set_tally(0, 100, 0); } diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index 355ce3021b87f..d5435daf185bd 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -42,14 +42,14 @@ fn basic_happy_path_works() { new_test_ext().execute_with(|| { // #1: submit assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_bounded(1), + set_balance_proposal_hash(1), DispatchTime::At(10), )); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(ReferendumCount::::get(), 1); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); run_to(4); assert_eq!(DecidingCount::::get(0), 0); run_to(5); @@ -63,7 +63,7 @@ fn basic_happy_path_works() { run_to(9); // #8: Should be confirmed & ended. assert_eq!(approved_since(0), 9); - assert_ok!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::refund_decision_deposit(Origin::signed(2), 0)); run_to(12); // #9: Should not yet be enacted. assert_eq!(Balances::free_balance(&42), 0); @@ -78,7 +78,7 @@ fn insta_confirm_then_kill_works() { new_test_ext().execute_with(|| { let r = Confirming { immediate: true }.create(); run_to(6); - assert_ok!(Referenda::kill(RuntimeOrigin::root(), r)); + assert_ok!(Referenda::kill(Origin::root(), r)); assert_eq!(killed_since(r), 6); }); } @@ -173,24 +173,24 @@ fn queueing_works() { new_test_ext().execute_with(|| { // Submit a proposal into a track with a queue len of 1. assert_ok!(Referenda::submit( - RuntimeOrigin::signed(5), + Origin::signed(5), Box::new(RawOrigin::Root.into()), - set_balance_proposal_bounded(0), + set_balance_proposal_hash(0), DispatchTime::After(0), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(5), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(5), 0)); run_to(2); // Submit 3 more proposals into the same queue. for i in 1..=4 { assert_ok!(Referenda::submit( - RuntimeOrigin::signed(i), + Origin::signed(i), Box::new(RawOrigin::Root.into()), - set_balance_proposal_bounded(i), + set_balance_proposal_hash(i), DispatchTime::After(0), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(i), i as u32)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(i), i as u32)); // TODO: decision deposit after some initial votes with a non-highest voted coming // first. } @@ -214,7 +214,7 @@ fn queueing_works() { println!("{:?}", Vec::<_>::from(TrackQueue::::get(0))); // Cancel the first. - assert_ok!(Referenda::cancel(RuntimeOrigin::signed(4), 0)); + assert_ok!(Referenda::cancel(Origin::signed(4), 0)); assert_eq!(cancelled_since(0), 6); // The other with the most approvals (#4) should be being decided. @@ -270,9 +270,9 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { new_test_ext().execute_with(|| { // #1: submit assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_bounded(1), + set_balance_proposal_hash(1), DispatchTime::At(20), )); run_to(20); @@ -290,20 +290,20 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { fn tracks_are_distinguished() { new_test_ext().execute_with(|| { assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_bounded(1), + set_balance_proposal_hash(1), DispatchTime::At(10), )); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(2), + Origin::signed(2), Box::new(RawOrigin::None.into()), - set_balance_proposal_bounded(2), + set_balance_proposal_hash(2), DispatchTime::At(20), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(3), 0)); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(4), 1)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(3), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(4), 1)); let mut i = ReferendumInfoFor::::iter().collect::>(); i.sort_by_key(|x| x.0); @@ -315,7 +315,7 @@ fn tracks_are_distinguished() { ReferendumInfo::Ongoing(ReferendumStatus { track: 0, origin: OriginCaller::system(RawOrigin::Root), - proposal: set_balance_proposal_bounded(1), + proposal_hash: set_balance_proposal_hash(1), enactment: DispatchTime::At(10), submitted: 1, submission_deposit: Deposit { who: 1, amount: 2 }, @@ -331,7 +331,7 @@ fn tracks_are_distinguished() { ReferendumInfo::Ongoing(ReferendumStatus { track: 1, origin: OriginCaller::system(RawOrigin::None), - proposal: set_balance_proposal_bounded(2), + proposal_hash: set_balance_proposal_hash(2), enactment: DispatchTime::At(20), submitted: 1, submission_deposit: Deposit { who: 2, amount: 2 }, @@ -350,13 +350,13 @@ fn tracks_are_distinguished() { #[test] fn submit_errors_work() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); // No track for Signed origins. assert_noop!( Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Signed(2).into()), - h.clone(), + h, DispatchTime::At(10), ), Error::::NoTrack @@ -365,7 +365,7 @@ fn submit_errors_work() { // No funds for deposit assert_noop!( Referenda::submit( - RuntimeOrigin::signed(10), + Origin::signed(10), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), @@ -379,21 +379,21 @@ fn submit_errors_work() { fn decision_deposit_errors_work() { new_test_ext().execute_with(|| { let e = Error::::NotOngoing; - assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); + assert_noop!(Referenda::place_decision_deposit(Origin::signed(2), 0), e); - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); let e = BalancesError::::InsufficientBalance; - assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(10), 0), e); + assert_noop!(Referenda::place_decision_deposit(Origin::signed(10), 0), e); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); let e = Error::::HasDeposit; - assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); + assert_noop!(Referenda::place_decision_deposit(Origin::signed(2), 0), e); }); } @@ -401,42 +401,42 @@ fn decision_deposit_errors_work() { fn refund_deposit_works() { new_test_ext().execute_with(|| { let e = Error::::BadReferendum; - assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(1), 0), e); + assert_noop!(Referenda::refund_decision_deposit(Origin::signed(1), 0), e); - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); let e = Error::::NoDeposit; - assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(2), 0), e); + assert_noop!(Referenda::refund_decision_deposit(Origin::signed(2), 0), e); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); let e = Error::::Unfinished; - assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0), e); + assert_noop!(Referenda::refund_decision_deposit(Origin::signed(3), 0), e); run_to(11); - assert_ok!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0)); + assert_ok!(Referenda::refund_decision_deposit(Origin::signed(3), 0)); }); } #[test] fn cancel_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); run_to(8); - assert_ok!(Referenda::cancel(RuntimeOrigin::signed(4), 0)); - assert_ok!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0)); + assert_ok!(Referenda::cancel(Origin::signed(4), 0)); + assert_ok!(Referenda::refund_decision_deposit(Origin::signed(3), 0)); assert_eq!(cancelled_since(0), 8); }); } @@ -444,37 +444,37 @@ fn cancel_works() { #[test] fn cancel_errors_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); - assert_noop!(Referenda::cancel(RuntimeOrigin::signed(1), 0), BadOrigin); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_noop!(Referenda::cancel(Origin::signed(1), 0), BadOrigin); run_to(11); - assert_noop!(Referenda::cancel(RuntimeOrigin::signed(4), 0), Error::::NotOngoing); + assert_noop!(Referenda::cancel(Origin::signed(4), 0), Error::::NotOngoing); }); } #[test] fn kill_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); run_to(8); - assert_ok!(Referenda::kill(RuntimeOrigin::root(), 0)); + assert_ok!(Referenda::kill(Origin::root(), 0)); let e = Error::::NoDeposit; - assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0), e); + assert_noop!(Referenda::refund_decision_deposit(Origin::signed(3), 0), e); assert_eq!(killed_since(0), 8); }); } @@ -482,25 +482,25 @@ fn kill_works() { #[test] fn kill_errors_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_bounded(1); + let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - RuntimeOrigin::signed(1), + Origin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); - assert_noop!(Referenda::kill(RuntimeOrigin::signed(4), 0), BadOrigin); + assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_noop!(Referenda::kill(Origin::signed(4), 0), BadOrigin); run_to(11); - assert_noop!(Referenda::kill(RuntimeOrigin::root(), 0), Error::::NotOngoing); + assert_noop!(Referenda::kill(Origin::root(), 0), Error::::NotOngoing); }); } #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { - let call = crate::mock::RuntimeCall::decode(&mut &set_balance_proposal(i)[..]).unwrap(); + let call = crate::mock::Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); assert!(!::BaseCallFilter::contains(&call)); } } diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index 48db0847edf2e..3eba783246e10 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -19,10 +19,7 @@ use super::*; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::{ - traits::{schedule::v3::Anon, Bounded}, - Parameter, -}; +use frame_support::{traits::schedule::Anon, Parameter}; use scale_info::TypeInfo; use sp_arithmetic::{Rounding::*, SignedRounding::*}; use sp_runtime::{FixedI64, PerThing, RuntimeDebug}; @@ -33,17 +30,15 @@ pub type BalanceOf = pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; -pub type CallOf = >::RuntimeCall; -pub type BoundedCallOf = Bounded<>::RuntimeCall>; +pub type CallOf = >::Call; pub type VotesOf = >::Votes; pub type TallyOf = >::Tally; -pub type PalletsOriginOf = - <::RuntimeOrigin as OriginTrait>::PalletsOrigin; +pub type PalletsOriginOf = <::Origin as OriginTrait>::PalletsOrigin; pub type ReferendumInfoOf = ReferendumInfo< TrackIdOf, PalletsOriginOf, ::BlockNumber, - BoundedCallOf, + ::Hash, BalanceOf, TallyOf, ::AccountId, @@ -53,7 +48,7 @@ pub type ReferendumStatusOf = ReferendumStatus< TrackIdOf, PalletsOriginOf, ::BlockNumber, - BoundedCallOf, + ::Hash, BalanceOf, TallyOf, ::AccountId, @@ -144,13 +139,13 @@ pub trait TracksInfo { type Id: Copy + Parameter + Ord + PartialOrd + Send + Sync + 'static + MaxEncodedLen; /// The origin type from which a track is implied. - type RuntimeOrigin; + type Origin; /// Return the array of known tracks and their information. fn tracks() -> &'static [(Self::Id, TrackInfo)]; /// Determine the voting track for the given `origin`. - fn track_for(origin: &Self::RuntimeOrigin) -> Result; + fn track_for(origin: &Self::Origin) -> Result; /// Return the track info for track `id`, by default this just looks it up in `Self::tracks()`. fn info(id: Self::Id) -> Option<&'static TrackInfo> { @@ -162,9 +157,9 @@ pub trait TracksInfo { #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ReferendumStatus< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Origin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -173,12 +168,12 @@ pub struct ReferendumStatus< /// The track of this referendum. pub(crate) track: TrackId, /// The origin for this referendum. - pub(crate) origin: RuntimeOrigin, + pub(crate) origin: Origin, /// The hash of the proposal up for referendum. - pub(crate) proposal: Call, + pub(crate) proposal_hash: Hash, /// The time the proposal should be scheduled for enactment. pub(crate) enactment: DispatchTime, - /// The time of submission. Once `UndecidingTimeout` passes, it may be closed by anyone if + /// The time of submission. Once `UndecidingTimeout` passes, it may be closed by anyone if it /// `deciding` is `None`. pub(crate) submitted: Moment, /// The deposit reserved for the submission of this referendum. @@ -199,9 +194,9 @@ pub struct ReferendumStatus< #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum ReferendumInfo< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Origin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -209,22 +204,13 @@ pub enum ReferendumInfo< > { /// Referendum has been submitted and is being voted on. Ongoing( - ReferendumStatus< - TrackId, - RuntimeOrigin, - Moment, - Call, - Balance, - Tally, - AccountId, - ScheduleAddress, - >, + ReferendumStatus, ), /// Referendum finished with approval. Submission deposit is held. Approved(Moment, Deposit, Option>), /// Referendum finished with rejection. Submission deposit is held. Rejected(Moment, Deposit, Option>), - /// Referendum finished with cancellation. Submission deposit is held. + /// Referendum finished with cancelation. Submission deposit is held. Cancelled(Moment, Deposit, Option>), /// Referendum finished and was never decided. Submission deposit is held. TimedOut(Moment, Deposit, Option>), @@ -234,14 +220,14 @@ pub enum ReferendumInfo< impl< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Origin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, ScheduleAddress: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - > ReferendumInfo + > ReferendumInfo { /// Take the Decision Deposit from `self`, if there is one. Returns an `Err` if `self` is not /// in a valid state for the Decision Deposit to be refunded. diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index d8609abb9fe80..d48ebb1014d48 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_referenda //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/referenda/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/referenda/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -83,231 +80,205 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - // Minimum execution time: 41_475 nanoseconds. - Weight::from_ref_time(42_153_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (34_640_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - // Minimum execution time: 52_291 nanoseconds. - Weight::from_ref_time(53_147_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (44_290_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - // Minimum execution time: 57_322 nanoseconds. - Weight::from_ref_time(58_145_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (49_428_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - // Minimum execution time: 57_170 nanoseconds. - Weight::from_ref_time(58_012_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (50_076_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - // Minimum execution time: 67_805 nanoseconds. - Weight::from_ref_time(68_844_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (55_935_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - // Minimum execution time: 63_408 nanoseconds. - Weight::from_ref_time(64_049_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (52_921_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - // Minimum execution time: 36_639 nanoseconds. - Weight::from_ref_time(37_329_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (29_160_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - // Minimum execution time: 42_442 nanoseconds. - Weight::from_ref_time(43_006_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (34_972_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - // Minimum execution time: 74_681 nanoseconds. - Weight::from_ref_time(75_567_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (60_620_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - // Minimum execution time: 14_262 nanoseconds. - Weight::from_ref_time(14_504_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (9_615_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - // Minimum execution time: 88_618 nanoseconds. - Weight::from_ref_time(89_443_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (113_077_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - // Minimum execution time: 89_784 nanoseconds. - Weight::from_ref_time(90_619_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (114_376_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - // Minimum execution time: 73_179 nanoseconds. - Weight::from_ref_time(74_025_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (43_901_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - // Minimum execution time: 73_168 nanoseconds. - Weight::from_ref_time(73_769_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (43_279_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - // Minimum execution time: 75_027 nanoseconds. - Weight::from_ref_time(76_220_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (45_564_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - // Minimum execution time: 74_815 nanoseconds. - Weight::from_ref_time(75_803_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (45_061_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - // Minimum execution time: 31_877 nanoseconds. - Weight::from_ref_time(32_236_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (23_757_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - // Minimum execution time: 33_322 nanoseconds. - Weight::from_ref_time(33_762_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (24_781_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - // Minimum execution time: 25_393 nanoseconds. - Weight::from_ref_time(25_913_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_344_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - // Minimum execution time: 47_114 nanoseconds. - Weight::from_ref_time(47_586_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (34_752_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - // Minimum execution time: 48_443 nanoseconds. - Weight::from_ref_time(50_003_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (37_055_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - // Minimum execution time: 44_556 nanoseconds. - Weight::from_ref_time(45_167_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (31_442_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - // Minimum execution time: 45_474 nanoseconds. - Weight::from_ref_time(46_105_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (33_201_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - // Minimum execution time: 42_795 nanoseconds. - Weight::from_ref_time(43_123_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (30_047_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - // Minimum execution time: 41_928 nanoseconds. - Weight::from_ref_time(42_272_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (29_195_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - // Minimum execution time: 55_186 nanoseconds. - Weight::from_ref_time(55_714_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (50_119_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - // Minimum execution time: 44_892 nanoseconds. - Weight::from_ref_time(45_353_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (32_203_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -317,230 +288,204 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - // Minimum execution time: 41_475 nanoseconds. - Weight::from_ref_time(42_153_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (34_640_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - // Minimum execution time: 52_291 nanoseconds. - Weight::from_ref_time(53_147_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (44_290_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - // Minimum execution time: 57_322 nanoseconds. - Weight::from_ref_time(58_145_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (49_428_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - // Minimum execution time: 57_170 nanoseconds. - Weight::from_ref_time(58_012_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (50_076_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - // Minimum execution time: 67_805 nanoseconds. - Weight::from_ref_time(68_844_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (55_935_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - // Minimum execution time: 63_408 nanoseconds. - Weight::from_ref_time(64_049_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (52_921_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - // Minimum execution time: 36_639 nanoseconds. - Weight::from_ref_time(37_329_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (29_160_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - // Minimum execution time: 42_442 nanoseconds. - Weight::from_ref_time(43_006_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (34_972_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - // Minimum execution time: 74_681 nanoseconds. - Weight::from_ref_time(75_567_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (60_620_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - // Minimum execution time: 14_262 nanoseconds. - Weight::from_ref_time(14_504_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (9_615_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - // Minimum execution time: 88_618 nanoseconds. - Weight::from_ref_time(89_443_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (113_077_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - // Minimum execution time: 89_784 nanoseconds. - Weight::from_ref_time(90_619_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (114_376_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - // Minimum execution time: 73_179 nanoseconds. - Weight::from_ref_time(74_025_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (43_901_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - // Minimum execution time: 73_168 nanoseconds. - Weight::from_ref_time(73_769_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (43_279_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - // Minimum execution time: 75_027 nanoseconds. - Weight::from_ref_time(76_220_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (45_564_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - // Minimum execution time: 74_815 nanoseconds. - Weight::from_ref_time(75_803_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (45_061_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - // Minimum execution time: 31_877 nanoseconds. - Weight::from_ref_time(32_236_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (23_757_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - // Minimum execution time: 33_322 nanoseconds. - Weight::from_ref_time(33_762_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (24_781_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - // Minimum execution time: 25_393 nanoseconds. - Weight::from_ref_time(25_913_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_344_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - // Minimum execution time: 47_114 nanoseconds. - Weight::from_ref_time(47_586_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (34_752_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - // Minimum execution time: 48_443 nanoseconds. - Weight::from_ref_time(50_003_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (37_055_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - // Minimum execution time: 44_556 nanoseconds. - Weight::from_ref_time(45_167_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (31_442_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - // Minimum execution time: 45_474 nanoseconds. - Weight::from_ref_time(46_105_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (33_201_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - // Minimum execution time: 42_795 nanoseconds. - Weight::from_ref_time(43_123_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (30_047_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - // Minimum execution time: 41_928 nanoseconds. - Weight::from_ref_time(42_272_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (29_195_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - // Minimum execution time: 55_186 nanoseconds. - Weight::from_ref_time(55_714_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (50_119_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - // Minimum execution time: 44_892 nanoseconds. - Weight::from_ref_time(45_353_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (32_203_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/remark/Cargo.toml b/frame/remark/Cargo.toml index f644ea723b59f..71a65ce554975 100644 --- a/frame/remark/Cargo.toml +++ b/frame/remark/Cargo.toml @@ -31,7 +31,6 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../primitive default = ["std"] runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", @@ -42,4 +41,3 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = [ "frame-support/try-runtime" ] diff --git a/frame/remark/src/benchmarking.rs b/frame/remark/src/benchmarking.rs index c0db8d5d3d59b..d30a8aa5df07d 100644 --- a/frame/remark/src/benchmarking.rs +++ b/frame/remark/src/benchmarking.rs @@ -27,9 +27,9 @@ use sp_std::*; #[cfg(test)] use crate::Pallet as Remark; -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { let events = System::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); + let system_event: ::Event = generic_event.into(); let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } diff --git a/frame/remark/src/lib.rs b/frame/remark/src/lib.rs index b61c79f7f273d..c69f95907019f 100644 --- a/frame/remark/src/lib.rs +++ b/frame/remark/src/lib.rs @@ -43,7 +43,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } diff --git a/frame/remark/src/mock.rs b/frame/remark/src/mock.rs index 22467796cf37b..67a0399e9c386 100644 --- a/frame/remark/src/mock.rs +++ b/frame/remark/src/mock.rs @@ -45,8 +45,8 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -54,7 +54,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -69,7 +69,7 @@ impl frame_system::Config for Test { } impl pallet_remark::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); } diff --git a/frame/remark/src/tests.rs b/frame/remark/src/tests.rs index 2278e3817b48a..60a376c5afca5 100644 --- a/frame/remark/src/tests.rs +++ b/frame/remark/src/tests.rs @@ -28,16 +28,13 @@ fn generates_event() { let caller = 1; let data = vec![0u8; 100]; System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. - assert_ok!(Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),)); + assert_ok!(Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),)); let events = System::events(); - // this one we create as we expect it - let system_event: ::RuntimeEvent = Event::Stored { + let system_event: ::Event = Event::Stored { content_hash: sp_io::hashing::blake2_256(&data).into(), sender: caller, } .into(); - // this one we actually go into the system pallet and get the last event - // because we know its there from block +1 let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); }); @@ -50,7 +47,7 @@ fn does_not_store_empty() { let data = vec![]; System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. assert_noop!( - Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),), + Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),), Error::::Empty ); assert!(System::events().is_empty()); diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index 0d739657c852b..b8bd4618f8def 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_remark //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/remark/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/remark/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,25 +51,21 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - /// The range of component `l` is `[1, 1048576]`. fn store(l: u32, ) -> Weight { - // Minimum execution time: 17_017 nanoseconds. - Weight::from_ref_time(8_269_935 as u64) - // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_407 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (13_140_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - /// The range of component `l` is `[1, 1048576]`. fn store(l: u32, ) -> Weight { - // Minimum execution time: 17_017 nanoseconds. - Weight::from_ref_time(8_269_935 as u64) - // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_407 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (13_140_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } } diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml deleted file mode 100644 index ea6a6527848aa..0000000000000 --- a/frame/root-offences/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pallet-root-offences" -version = "1.0.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME root offences pallet" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } - -pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } -pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../frame/staking" } -pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../frame/offences" } - -frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } -sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } - -[dev-dependencies] -pallet-balances = { version = "4.0.0-dev", path = "../balances" } -pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } -pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } - -sp-core = { version = "6.0.0", path = "../../primitives/core" } -sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } - -frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } - -[features] -runtime-benchmarks = [] -try-runtime = ["frame-support/try-runtime"] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "pallet-session/std", - "pallet-staking/std", - "pallet-offences/std", - "scale-info/std", - "sp-runtime/std", -] diff --git a/frame/root-offences/README.md b/frame/root-offences/README.md deleted file mode 100644 index a2c5261b6985a..0000000000000 --- a/frame/root-offences/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Sudo Offences Pallet - -Pallet that allows the root to create an offence. - -NOTE: This pallet should only be used for testing purposes. \ No newline at end of file diff --git a/frame/root-offences/src/lib.rs b/frame/root-offences/src/lib.rs deleted file mode 100644 index b4b549627f3fa..0000000000000 --- a/frame/root-offences/src/lib.rs +++ /dev/null @@ -1,131 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Sudo Offences Pallet -//! Pallet that allows the root to create an offence. -//! -//! NOTE: This pallet should be used for testing purposes. - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(test)] -mod mock; -#[cfg(test)] -mod tests; - -use pallet_session::historical::IdentificationTuple; -use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; -use sp_runtime::Perbill; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: - frame_system::Config - + pallet_staking::Config - + pallet_session::Config::AccountId> - + pallet_session::historical::Config< - FullIdentification = Exposure< - ::AccountId, - BalanceOf, - >, - FullIdentificationOf = ExposureOf, - > - { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - } - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// An offence was created by root. - OffenceCreated { offenders: Vec<(T::AccountId, Perbill)> }, - } - - #[pallet::error] - pub enum Error { - /// Failed to get the active era from the staking pallet. - FailedToGetActiveEra, - } - - type OffenceDetails = sp_staking::offence::OffenceDetails< - ::AccountId, - IdentificationTuple, - >; - - #[pallet::call] - impl Pallet { - /// Allows the `root`, for example sudo to create an offence. - #[pallet::weight(T::DbWeight::get().reads(2))] - pub fn create_offence( - origin: OriginFor, - offenders: Vec<(T::AccountId, Perbill)>, - ) -> DispatchResult { - ensure_root(origin)?; - - let slash_fraction = - offenders.clone().into_iter().map(|(_, fraction)| fraction).collect::>(); - let offence_details = Self::get_offence_details(offenders.clone())?; - - Self::submit_offence(&offence_details, &slash_fraction); - Self::deposit_event(Event::OffenceCreated { offenders }); - Ok(()) - } - } - - impl Pallet { - /// Returns a vector of offenders that are going to be slashed. - fn get_offence_details( - offenders: Vec<(T::AccountId, Perbill)>, - ) -> Result>, DispatchError> { - let now = Staking::::active_era() - .map(|e| e.index) - .ok_or(Error::::FailedToGetActiveEra)?; - - Ok(offenders - .clone() - .into_iter() - .map(|(o, _)| OffenceDetails:: { - offender: (o.clone(), Staking::::eras_stakers(now, o)), - reporters: vec![], - }) - .collect()) - } - - /// Submits the offence by calling the `on_offence` function. - fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { - let session_index = as frame_support::traits::ValidatorSet>::session_index(); - - as OnOffenceHandler< - T::AccountId, - IdentificationTuple, - Weight, - >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); - } - } -} diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs deleted file mode 100644 index 65bfcad4b26fc..0000000000000 --- a/frame/root-offences/src/mock.rs +++ /dev/null @@ -1,359 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::*; -use crate as root_offences; - -use frame_election_provider_support::{onchain, SequentialPhragmen}; -use frame_support::{ - parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, Hooks, OneSessionHandler}, -}; -use pallet_staking::StakerStatus; -use sp_core::H256; -use sp_runtime::{ - curve::PiecewiseLinear, - testing::{Header, UintAuthorityId}, - traits::{BlakeTwo256, IdentityLookup, Zero}, -}; -use sp_staking::{EraIndex, SessionIndex}; -use sp_std::collections::btree_map::BTreeMap; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; -type AccountId = u64; -type Balance = u64; -type BlockNumber = u64; - -pub const INIT_TIMESTAMP: u64 = 30_000; -pub const BLOCK_TIME: u64 = 1000; - -frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - RootOffences: root_offences::{Pallet, Call, Storage, Event}, - Historical: pallet_session::historical::{Pallet, Storage}, - } -); - -/// Another session handler struct to test on_disabled. -pub struct OtherSessionHandler; -impl OneSessionHandler for OtherSessionHandler { - type Key = UintAuthorityId; - - fn on_genesis_session<'a, I: 'a>(_: I) - where - I: Iterator, - AccountId: 'a, - { - } - - fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) - where - I: Iterator, - AccountId: 'a, - { - } - - fn on_disabled(_validator_index: u32) {} -} - -impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { - type Public = UintAuthorityId; -} - -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); -} - -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} - -impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type WeightInfo = (); -} - -pallet_staking_reward_curve::build! { - const REWARD_CURVE: PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000u64, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); -} - -pub struct OnChainSeqPhragmen; -impl onchain::Config for OnChainSeqPhragmen { - type System = Test; - type Solver = SequentialPhragmen; - type DataProvider = Staking; - type WeightInfo = (); - type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; -} - -pub struct OnStakerSlashMock(core::marker::PhantomData); -impl sp_staking::OnStakerSlash for OnStakerSlashMock { - fn on_slash( - _pool_account: &AccountId, - slashed_bonded: Balance, - slashed_chunks: &BTreeMap, - ) { - LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); - } -} - -parameter_types! { - pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub static Offset: BlockNumber = 0; - pub const Period: BlockNumber = 1; - pub static SessionsPerEra: SessionIndex = 3; - pub static SlashDeferDuration: EraIndex = 0; - pub const BondingDuration: EraIndex = 3; - pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); -} - -impl pallet_staking::Config for Test { - type MaxNominations = ConstU32<16>; - type Currency = Balances; - type CurrencyBalance = ::Balance; - type UnixTime = Timestamp; - type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = frame_system::EnsureRoot; - type BondingDuration = BondingDuration; - type SessionInterface = Self; - type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; - type ElectionProvider = onchain::OnChainExecution; - type GenesisElectionProvider = Self::ElectionProvider; - type TargetList = pallet_staking::UseValidatorsMap; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; - type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type OnStakerSlash = OnStakerSlashMock; - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); -} - -impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; -} - -sp_runtime::impl_opaque_keys! { - pub struct SessionKeys { - pub other: OtherSessionHandler, - } -} - -impl pallet_session::Config for Test { - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type Keys = SessionKeys; - type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionHandler = (OtherSessionHandler,); - type RuntimeEvent = RuntimeEvent; - type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; - type NextSessionRotation = pallet_session::PeriodicSessions; - type WeightInfo = (); -} - -impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = ConstU64<5>; - type WeightInfo = (); -} - -impl Config for Test { - type RuntimeEvent = RuntimeEvent; -} - -pub struct ExtBuilder { - validator_count: u32, - minimum_validator_count: u32, - invulnerables: Vec, - balance_factor: Balance, -} - -impl Default for ExtBuilder { - fn default() -> Self { - Self { - validator_count: 2, - minimum_validator_count: 0, - invulnerables: vec![], - balance_factor: 1, - } - } -} - -impl ExtBuilder { - fn build(self) -> sp_io::TestExternalities { - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - pallet_balances::GenesisConfig:: { - balances: vec![ - //controllers - (10, self.balance_factor * 50), - (20, self.balance_factor * 50), - (30, self.balance_factor * 50), - (40, self.balance_factor * 50), - // stashes - (11, self.balance_factor * 1000), - (21, self.balance_factor * 1000), - (31, self.balance_factor * 500), - (41, self.balance_factor * 1000), - ], - } - .assimilate_storage(&mut storage) - .unwrap(); - - let stakers = vec![ - // (stash, ctrl, stake, status) - // these two will be elected in the default test where we elect 2. - (11, 10, 1000, StakerStatus::::Validator), - (21, 20, 1000, StakerStatus::::Validator), - // a loser validator - (31, 30, 500, StakerStatus::::Validator), - // an idle validator - (41, 40, 1000, StakerStatus::::Idle), - ]; - - let _ = pallet_staking::GenesisConfig:: { - stakers: stakers.clone(), - ..Default::default() - }; - - let _ = pallet_staking::GenesisConfig:: { - stakers: stakers.clone(), - validator_count: self.validator_count, - minimum_validator_count: self.minimum_validator_count, - invulnerables: self.invulnerables, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - } - .assimilate_storage(&mut storage); - - let _ = pallet_session::GenesisConfig:: { - keys: stakers - .into_iter() - .map(|(id, ..)| (id, id, SessionKeys { other: id.into() })) - .collect(), - } - .assimilate_storage(&mut storage); - - storage.into() - } - - pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - let mut ext = self.build(); - ext.execute_with(test); - } -} - -/// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. -pub(crate) fn start_session(session_index: SessionIndex) { - let end: u64 = if Offset::get().is_zero() { - (session_index as u64) * Period::get() - } else { - Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() - }; - run_to_block(end); - // session must have progressed properly. - assert_eq!( - Session::current_index(), - session_index, - "current session index = {}, expected = {}", - Session::current_index(), - session_index, - ); -} - -/// Progress to the given block, triggering session and era changes as we progress. -/// -/// This will finalize the previous block, initialize up to the given block, essentially simulating -/// a block import/propose process where we first initialize the block, then execute some stuff (not -/// in the function), and then finalize the block. -pub(crate) fn run_to_block(n: BlockNumber) { - Staking::on_finalize(System::block_number()); - for b in (System::block_number() + 1)..=n { - System::set_block_number(b); - Session::on_initialize(b); - >::on_initialize(b); - Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); - if b != n { - Staking::on_finalize(System::block_number()); - } - } -} - -pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index -} diff --git a/frame/root-offences/src/tests.rs b/frame/root-offences/src/tests.rs deleted file mode 100644 index a8b7d0a6d6aca..0000000000000 --- a/frame/root-offences/src/tests.rs +++ /dev/null @@ -1,94 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::*; -use frame_support::{assert_err, assert_ok}; -use mock::{active_era, start_session, Balances, ExtBuilder, RootOffences, RuntimeOrigin, System}; - -#[test] -fn create_offence_fails_given_signed_origin() { - use sp_runtime::traits::BadOrigin; - ExtBuilder::default().build_and_execute(|| { - let offenders = (&[]).to_vec(); - assert_err!(RootOffences::create_offence(RuntimeOrigin::signed(1), offenders), BadOrigin); - }) -} - -#[test] -fn create_offence_works_given_root_origin() { - ExtBuilder::default().build_and_execute(|| { - start_session(1); - - assert_eq!(active_era(), 0); - - assert_eq!(Balances::free_balance(11), 1000); - - let offenders = [(11, Perbill::from_percent(50))].to_vec(); - assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); - - System::assert_last_event(Event::OffenceCreated { offenders }.into()); - // the slash should be applied right away. - assert_eq!(Balances::free_balance(11), 500); - - // the other validator should keep his balance, because we only created - // an offences for the first validator. - assert_eq!(Balances::free_balance(21), 1000); - }) -} - -#[test] -fn create_offence_wont_slash_non_active_validators() { - ExtBuilder::default().build_and_execute(|| { - start_session(1); - - assert_eq!(active_era(), 0); - - // 31 is not an active validator. - assert_eq!(Balances::free_balance(31), 500); - - let offenders = [(31, Perbill::from_percent(20)), (11, Perbill::from_percent(20))].to_vec(); - assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); - - System::assert_last_event(Event::OffenceCreated { offenders }.into()); - - // so 31 didn't get slashed. - assert_eq!(Balances::free_balance(31), 500); - - // but 11 is an active validator so he got slashed. - assert_eq!(Balances::free_balance(11), 800); - }) -} - -#[test] -fn create_offence_wont_slash_idle() { - ExtBuilder::default().build_and_execute(|| { - start_session(1); - - assert_eq!(active_era(), 0); - - // 41 is idle. - assert_eq!(Balances::free_balance(41), 1000); - - let offenders = [(41, Perbill::from_percent(50))].to_vec(); - assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); - - System::assert_last_event(Event::OffenceCreated { offenders }.into()); - - // 41 didn't get slashed. - assert_eq!(Balances::free_balance(41), 1000); - }) -} diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index e78d8cd5061c1..d92d0df0c8037 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -34,7 +34,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index e621c913b2386..9c97bc5973384 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -18,214 +18,198 @@ //! Scheduler pallet benchmarking. use super::*; -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::benchmarks; use frame_support::{ ensure, - traits::{schedule::Priority, BoundedInline}, + traits::{OnInitialize, PreimageProvider, PreimageRecipient}, }; use frame_system::RawOrigin; +use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; -use frame_system::Call as SystemCall; - -const SEED: u32 = 0; +use frame_system::Pallet as System; const BLOCK_NUMBER: u32 = 2; -type SystemOrigin = ::RuntimeOrigin; - -/// Add `n` items to the schedule. +/// Add `n` named items to the schedule. /// /// For `resolved`: -/// - ` /// - `None`: aborted (hash without preimage) /// - `Some(true)`: hash resolves into call if possible, plain call otherwise /// - `Some(false)`: plain call -fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { - let t = DispatchTime::At(when); - let origin: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); - for i in 0..n { - let call = make_call::(None); - let period = Some(((i + 100).into(), 100)); - let name = u32_to_name(i); - Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; - } - ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); - Ok(()) -} - -fn u32_to_name(i: u32) -> TaskName { - i.using_encoded(blake2_256) -} - -fn make_task( +fn fill_schedule( + when: T::BlockNumber, + n: u32, periodic: bool, named: bool, - signed: bool, - maybe_lookup_len: Option, - priority: Priority, -) -> ScheduledOf { - let call = make_call::(maybe_lookup_len); - let maybe_periodic = match periodic { - true => Some((100u32.into(), 100)), - false => None, - }; - let maybe_id = match named { - true => Some(u32_to_name(0)), - false => None, - }; - let origin = make_origin::(signed); - Scheduled { maybe_id, priority, call, maybe_periodic, origin, _phantom: PhantomData } -} - -fn bounded(len: u32) -> Option::RuntimeCall>> { - let call = - <::RuntimeCall>::from(SystemCall::remark { remark: vec![0; len as usize] }); - T::Preimages::bound(call).ok() -} - -fn make_call(maybe_lookup_len: Option) -> Bounded<::RuntimeCall> { - let bound = BoundedInline::bound() as u32; - let mut len = match maybe_lookup_len { - Some(len) => len.min(T::Preimages::MAX_LENGTH as u32 - 2).max(bound) - 3, - None => bound.saturating_sub(4), - }; - - loop { - let c = match bounded::(len) { - Some(x) => x, - None => { - len -= 1; - continue + resolved: Option, +) -> Result<(), &'static str> { + for i in 0..n { + // Named schedule is strictly heavier than anonymous + let (call, hash) = call_and_hash::(i); + let call_or_hash = match resolved { + Some(true) => { + T::PreimageProvider::note_preimage(call.encode().try_into().unwrap()); + if T::PreimageProvider::have_preimage(&hash) { + CallOrHashOf::::Hash(hash) + } else { + call.into() + } }, + Some(false) => call.into(), + None => CallOrHashOf::::Hash(hash), }; - if c.lookup_needed() == maybe_lookup_len.is_some() { - break c - } - if maybe_lookup_len.is_some() { - len += 1; + let period = match periodic { + true => Some(((i + 100).into(), 100)), + false => None, + }; + let t = DispatchTime::At(when); + let origin = frame_system::RawOrigin::Root.into(); + if named { + Scheduler::::do_schedule_named(i.encode(), t, period, 0, origin, call_or_hash)?; } else { - if len > 0 { - len -= 1; - } else { - break c - } + Scheduler::::do_schedule(t, period, 0, origin, call_or_hash)?; } } + ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); + Ok(()) } -fn make_origin(signed: bool) -> ::PalletsOrigin { - match signed { - true => frame_system::RawOrigin::Signed(account("origin", 0, SEED)).into(), - false => frame_system::RawOrigin::Root.into(), - } +fn call_and_hash(i: u32) -> (::Call, T::Hash) { + // Essentially a no-op call. + let call: ::Call = frame_system::Call::remark { remark: i.encode() }.into(); + let hash = T::Hashing::hash_of(&call); + (call, hash) } benchmarks! { - // `service_agendas` when no work is done. - service_agendas_base { - let now = T::BlockNumber::from(BLOCK_NUMBER); - IncompleteSince::::put(now - One::one()); - }: { - Scheduler::::service_agendas(&mut WeightMeter::max_limit(), now, 0); - } verify { - assert_eq!(IncompleteSince::::get(), Some(now - One::one())); + on_initialize_periodic_named_resolved { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, true, true, Some(true))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), s * 2); + for i in 0..s { + assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); + } } - // `service_agenda` when no work is done. - service_agenda_base { - let now = BLOCK_NUMBER.into(); - let s in 0 .. T::MaxScheduledPerBlock::get(); - fill_schedule::(now, s)?; - let mut executed = 0; - }: { - Scheduler::::service_agenda(&mut WeightMeter::max_limit(), &mut executed, now, now, 0); - } verify { - assert_eq!(executed, 0); + on_initialize_named_resolved { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, false, true, Some(true))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), s * 2); + assert!(Agenda::::iter().count() == 0); } - // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not - // dispatched (e.g. due to being overweight). - service_task_base { - let now = BLOCK_NUMBER.into(); - let task = make_task::(false, false, false, None, 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightMeter::from_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - //assert_eq!(result, Ok(())); + on_initialize_periodic_resolved { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, true, false, Some(true))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), s * 2); + for i in 0..s { + assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); + } } - // `service_task` when the task is a non-periodic, non-named, fetched call (with a known - // preimage length) and which is not dispatched (e.g. due to being overweight). - service_task_fetched { - let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); - let now = BLOCK_NUMBER.into(); - let task = make_task::(false, false, false, Some(s), 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightMeter::from_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + on_initialize_resolved { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, false, false, Some(true))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), s * 2); + assert!(Agenda::::iter().count() == 0); } - // `service_task` when the task is a non-periodic, named, non-fetched call which is not - // dispatched (e.g. due to being overweight). - service_task_named { - let now = BLOCK_NUMBER.into(); - let task = make_task::(false, true, false, None, 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightMeter::from_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + on_initialize_named_aborted { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, false, true, None)?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), 0); + if let Some(delay) = T::NoPreimagePostponement::get() { + assert_eq!(Agenda::::get(when + delay).len(), s as usize); + } else { + assert!(Agenda::::iter().count() == 0); + } } - // `service_task` when the task is a periodic, non-named, non-fetched call which is not - // dispatched (e.g. due to being overweight). - service_task_periodic { - let now = BLOCK_NUMBER.into(); - let task = make_task::(true, false, false, None, 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightMeter::from_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + on_initialize_aborted { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, false, false, None)?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), 0); + if let Some(delay) = T::NoPreimagePostponement::get() { + assert_eq!(Agenda::::get(when + delay).len(), s as usize); + } else { + assert!(Agenda::::iter().count() == 0); + } } - // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. - execute_dispatch_signed { - let mut counter = WeightMeter::max_limit(); - let origin = make_origin::(true); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); + on_initialize_periodic_named { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, true, true, Some(false))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), s); + for i in 0..s { + assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); + } } + + on_initialize_periodic { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, true, false, Some(false))?; + }: { Scheduler::::on_initialize(when); } verify { + assert_eq!(System::::event_count(), s); + for i in 0..s { + assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); + } } - // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. - execute_dispatch_unsigned { - let mut counter = WeightMeter::max_limit(); - let origin = make_origin::(false); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); + on_initialize_named { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, false, true, Some(false))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } + verify { + assert_eq!(System::::event_count(), s); + assert!(Agenda::::iter().count() == 0); } + + on_initialize { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + fill_schedule::(when, s, false, false, Some(false))?; + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } verify { + assert_eq!(System::::event_count(), s); + assert!(Agenda::::iter().count() == 0); } schedule { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + let s in 0 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); + let inner_call = frame_system::Call::set_storage { items: vec![] }.into(); + let call = Box::new(CallOrHashOf::::Value(inner_call)); - fill_schedule::(when, s)?; + fill_schedule::(when, s, true, true, Some(false))?; }: _(RawOrigin::Root, when, periodic, priority, call) verify { ensure!( @@ -238,13 +222,12 @@ benchmarks! { let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s)?; + fill_schedule::(when, s, true, true, Some(false))?; assert_eq!(Agenda::::get(when).len(), s as usize); - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, when, 0) + }: _(RawOrigin::Root, when, 0) verify { ensure!( - Lookup::::get(u32_to_name(0)).is_none(), + Lookup::::get(0.encode()).is_none(), "didn't remove from lookup" ); // Removed schedule is NONE @@ -255,15 +238,16 @@ benchmarks! { } schedule_named { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); - let id = u32_to_name(s); + let s in 0 .. T::MaxScheduledPerBlock::get(); + let id = s.encode(); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); + let inner_call = frame_system::Call::set_storage { items: vec![] }.into(); + let call = Box::new(CallOrHashOf::::Value(inner_call)); - fill_schedule::(when, s)?; + fill_schedule::(when, s, true, true, Some(false))?; }: _(RawOrigin::Root, id, when, periodic, priority, call) verify { ensure!( @@ -276,11 +260,11 @@ benchmarks! { let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s)?; - }: _(RawOrigin::Root, u32_to_name(0)) + fill_schedule::(when, s, true, true, Some(false))?; + }: _(RawOrigin::Root, 0.encode()) verify { ensure!( - Lookup::::get(u32_to_name(0)).is_none(), + Lookup::::get(0.encode()).is_none(), "didn't remove from lookup" ); // Removed schedule is NONE diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 78533540be98f..a005c051a1abc 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -52,33 +52,27 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] mod tests; pub mod weights; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Codec, Decode, Encode}; use frame_support::{ - dispatch::{ - DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter, RawOrigin, - }, - ensure, + dispatch::{DispatchError, DispatchResult, Dispatchable, Parameter}, traits::{ schedule::{self, DispatchTime, MaybeHashed}, - Bounded, CallerTrait, EnsureOrigin, Get, Hash as PreimageHash, IsType, OriginTrait, - PalletInfoAccess, PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, + EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess, PrivilegeCmp, StorageVersion, }, - weights::{Weight, WeightMeter}, + weights::{GetDispatchInfo, Weight}, }; -use frame_system::{self as system}; +use frame_system::{self as system, ensure_signed}; pub use pallet::*; use scale_info::TypeInfo; -use sp_io::hashing::blake2_256; use sp_runtime::{ traits::{BadOrigin, One, Saturating, Zero}, - BoundedVec, RuntimeDebug, + RuntimeDebug, }; use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; pub use weights::WeightInfo; @@ -88,8 +82,7 @@ pub type PeriodicIndex = u32; /// The location of a scheduled task that can be used to remove it. pub type TaskAddress = (BlockNumber, u32); -pub type CallOrHashOf = - MaybeHashed<::RuntimeCall, ::Hash>; +pub type CallOrHashOf = MaybeHashed<::Call, ::Hash>; #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] #[derive(Clone, RuntimeDebug, Encode, Decode)] @@ -102,61 +95,82 @@ struct ScheduledV1 { /// Information regarding an item to be executed in the future. #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] -#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct Scheduled { +#[derive(Clone, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct ScheduledV3 { /// The unique identity for this task, if there is one. - maybe_id: Option, + maybe_id: Option>, /// This task's priority. priority: schedule::Priority, /// The call to be dispatched. call: Call, /// If the call is periodic, then this points to the information concerning that. maybe_periodic: Option>, - /// The origin with which to dispatch the call. + /// The origin to dispatch the call. origin: PalletsOrigin, _phantom: PhantomData, } -use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2}; +use crate::ScheduledV3 as ScheduledV2; -pub type ScheduledV2Of = ScheduledV2< - Vec, - ::RuntimeCall, +pub type ScheduledV2Of = ScheduledV3< + ::Call, ::BlockNumber, ::PalletsOrigin, ::AccountId, >; pub type ScheduledV3Of = ScheduledV3< - Vec, CallOrHashOf, ::BlockNumber, ::PalletsOrigin, ::AccountId, >; -pub type ScheduledOf = Scheduled< - TaskName, - Bounded<::RuntimeCall>, - ::BlockNumber, - ::PalletsOrigin, - ::AccountId, ->; +pub type ScheduledOf = ScheduledV3Of; + +/// The current version of Scheduled struct. +pub type Scheduled = + ScheduledV2; + +#[cfg(feature = "runtime-benchmarks")] +mod preimage_provider { + use frame_support::traits::PreimageRecipient; + pub trait PreimageProviderAndMaybeRecipient: PreimageRecipient {} + impl> PreimageProviderAndMaybeRecipient for T {} +} + +#[cfg(not(feature = "runtime-benchmarks"))] +mod preimage_provider { + use frame_support::traits::PreimageProvider; + pub trait PreimageProviderAndMaybeRecipient: PreimageProvider {} + impl> PreimageProviderAndMaybeRecipient for T {} +} + +pub use preimage_provider::PreimageProviderAndMaybeRecipient; pub(crate) trait MarginalWeightInfo: WeightInfo { - fn service_task(maybe_lookup_len: Option, named: bool, periodic: bool) -> Weight { - let base = Self::service_task_base(); - let mut total = match maybe_lookup_len { - None => base, - Some(l) => Self::service_task_fetched(l as u32), - }; - if named { - total.saturating_accrue(Self::service_task_named().saturating_sub(base)); + fn item(periodic: bool, named: bool, resolved: Option) -> Weight { + match (periodic, named, resolved) { + (_, false, None) => Self::on_initialize_aborted(2) - Self::on_initialize_aborted(1), + (_, true, None) => + Self::on_initialize_named_aborted(2) - Self::on_initialize_named_aborted(1), + (false, false, Some(false)) => Self::on_initialize(2) - Self::on_initialize(1), + (false, true, Some(false)) => + Self::on_initialize_named(2) - Self::on_initialize_named(1), + (true, false, Some(false)) => + Self::on_initialize_periodic(2) - Self::on_initialize_periodic(1), + (true, true, Some(false)) => + Self::on_initialize_periodic_named(2) - Self::on_initialize_periodic_named(1), + (false, false, Some(true)) => + Self::on_initialize_resolved(2) - Self::on_initialize_resolved(1), + (false, true, Some(true)) => + Self::on_initialize_named_resolved(2) - Self::on_initialize_named_resolved(1), + (true, false, Some(true)) => + Self::on_initialize_periodic_resolved(2) - Self::on_initialize_periodic_resolved(1), + (true, true, Some(true)) => + Self::on_initialize_periodic_named_resolved(2) - + Self::on_initialize_periodic_named_resolved(1), } - if periodic { - total.saturating_accrue(Self::service_task_periodic().saturating_sub(base)); - } - total } } impl MarginalWeightInfo for T {} @@ -164,7 +178,11 @@ impl MarginalWeightInfo for T {} #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; + use frame_support::{ + dispatch::PostDispatchInfo, + pallet_prelude::*, + traits::{schedule::LookupError, PreimageProvider}, + }; use frame_system::pallet_prelude::*; /// The current storage version. @@ -173,38 +191,36 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::without_storage_info] pub struct Pallet(_); /// `system::Config` should always be included in our implied traits. #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The aggregated origin which the dispatch will take. - type RuntimeOrigin: OriginTrait + type Origin: OriginTrait + From - + IsType<::RuntimeOrigin>; + + IsType<::Origin>; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: From> - + CallerTrait - + MaxEncodedLen; + type PalletsOrigin: From> + Codec + Clone + Eq + TypeInfo; /// The aggregated call type. - type RuntimeCall: Parameter - + Dispatchable< - RuntimeOrigin = ::RuntimeOrigin, - PostInfo = PostDispatchInfo, - > + GetDispatchInfo + type Call: Parameter + + Dispatchable::Origin, PostInfo = PostDispatchInfo> + + GetDispatchInfo + From>; - /// The maximum weight that may be scheduled per block for any dispatchables. + /// The maximum weight that may be scheduled per block for any dispatchables of less + /// priority than `schedule::HARD_DEADLINE`. #[pallet::constant] type MaximumWeight: Get; /// Required origin to schedule or cancel calls. - type ScheduleOrigin: EnsureOrigin<::RuntimeOrigin>; + type ScheduleOrigin: EnsureOrigin<::Origin>; /// Compare the privileges of origins. /// @@ -216,6 +232,7 @@ pub mod pallet { type OriginPrivilegeCmp: PrivilegeCmp; /// The maximum number of scheduled calls in the queue for a single block. + /// Not strictly enforced, but used for weight estimation. #[pallet::constant] type MaxScheduledPerBlock: Get; @@ -223,29 +240,21 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The preimage provider with which we look up call hashes to get the call. - type Preimages: QueryPreimage + StorePreimage; - } + type PreimageProvider: PreimageProviderAndMaybeRecipient; - #[pallet::storage] - pub type IncompleteSince = StorageValue<_, T::BlockNumber>; + /// If `Some` then the number of blocks to postpone execution for when the item is delayed. + type NoPreimagePostponement: Get>; + } /// Items to be executed, indexed by the block number that they should be executed on. #[pallet::storage] - pub type Agenda = StorageMap< - _, - Twox64Concat, - T::BlockNumber, - BoundedVec>, T::MaxScheduledPerBlock>, - ValueQuery, - >; - - /// Lookup from a name to the block number and index of the task. - /// - /// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4 - /// identities. + pub type Agenda = + StorageMap<_, Twox64Concat, T::BlockNumber, Vec>>, ValueQuery>; + + /// Lookup from identity to the block number and index of the task. #[pallet::storage] pub(crate) type Lookup = - StorageMap<_, Twox64Concat, TaskName, TaskAddress>; + StorageMap<_, Twox64Concat, Vec, TaskAddress>; /// Events type. #[pallet::event] @@ -258,15 +267,15 @@ pub mod pallet { /// Dispatched some task. Dispatched { task: TaskAddress, - id: Option, + id: Option>, result: DispatchResult, }, /// The call for the provided hash was not found so the task has been aborted. - CallUnavailable { task: TaskAddress, id: Option }, - /// The given task was unable to be renewed since the agenda is full at that block. - PeriodicFailed { task: TaskAddress, id: Option }, - /// The given task can never be executed since it is overweight. - PermanentlyOverweight { task: TaskAddress, id: Option }, + CallLookupFailed { + task: TaskAddress, + id: Option>, + error: LookupError, + }, } #[pallet::error] @@ -279,17 +288,133 @@ pub mod pallet { TargetBlockNumberInPast, /// Reschedule failed because it does not change scheduled time. RescheduleNoChange, - /// Attempt to use a non-named function on a named task. - Named, } #[pallet::hooks] impl Hooks> for Pallet { /// Execute the scheduled calls fn on_initialize(now: T::BlockNumber) -> Weight { - let mut weight_counter = WeightMeter::from_limit(T::MaximumWeight::get()); - Self::service_agendas(&mut weight_counter, now, u32::max_value()); - weight_counter.consumed + let limit = T::MaximumWeight::get(); + + let mut queued = Agenda::::take(now) + .into_iter() + .enumerate() + .filter_map(|(index, s)| Some((index as u32, s?))) + .collect::>(); + + if queued.len() as u32 > T::MaxScheduledPerBlock::get() { + log::warn!( + target: "runtime::scheduler", + "Warning: This block has more items queued in Scheduler than \ + expected from the runtime configuration. An update might be needed." + ); + } + + queued.sort_by_key(|(_, s)| s.priority); + + let next = now + One::one(); + + let mut total_weight: Weight = T::WeightInfo::on_initialize(0); + for (order, (index, mut s)) in queued.into_iter().enumerate() { + let named = if let Some(ref id) = s.maybe_id { + Lookup::::remove(id); + true + } else { + false + }; + + let (call, maybe_completed) = s.call.resolved::(); + s.call = call; + + let resolved = if let Some(completed) = maybe_completed { + T::PreimageProvider::unrequest_preimage(&completed); + true + } else { + false + }; + + let call = match s.call.as_value().cloned() { + Some(c) => c, + None => { + // Preimage not available - postpone until some block. + total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); + if let Some(delay) = T::NoPreimagePostponement::get() { + let until = now.saturating_add(delay); + if let Some(ref id) = s.maybe_id { + let index = Agenda::::decode_len(until).unwrap_or(0); + Lookup::::insert(id, (until, index as u32)); + } + Agenda::::append(until, Some(s)); + } + continue + }, + }; + + let periodic = s.maybe_periodic.is_some(); + let call_weight = call.get_dispatch_info().weight; + let mut item_weight = T::WeightInfo::item(periodic, named, Some(resolved)); + let origin = + <::Origin as From>::from(s.origin.clone()) + .into(); + if ensure_signed(origin).is_ok() { + // Weights of Signed dispatches expect their signing account to be whitelisted. + item_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // We allow a scheduled call if any is true: + // - It's priority is `HARD_DEADLINE` + // - It does not push the weight past the limit. + // - It is the first item in the schedule + let hard_deadline = s.priority <= schedule::HARD_DEADLINE; + let test_weight = + total_weight.saturating_add(call_weight).saturating_add(item_weight); + if !hard_deadline && order > 0 && test_weight > limit { + // Cannot be scheduled this block - postpone until next. + total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); + if let Some(ref id) = s.maybe_id { + // NOTE: We could reasonably not do this (in which case there would be one + // block where the named and delayed item could not be referenced by name), + // but we will do it anyway since it should be mostly free in terms of + // weight and it is slightly cleaner. + let index = Agenda::::decode_len(next).unwrap_or(0); + Lookup::::insert(id, (next, index as u32)); + } + Agenda::::append(next, Some(s)); + continue + } + + let dispatch_origin = s.origin.clone().into(); + let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { + Ok(post_info) => (post_info.actual_weight, Ok(())), + Err(error_and_info) => + (error_and_info.post_info.actual_weight, Err(error_and_info.error)), + }; + let actual_call_weight = maybe_actual_call_weight.unwrap_or(call_weight); + total_weight.saturating_accrue(item_weight); + total_weight.saturating_accrue(actual_call_weight); + + Self::deposit_event(Event::Dispatched { + task: (now, index), + id: s.maybe_id.clone(), + result, + }); + + if let &Some((period, count)) = &s.maybe_periodic { + if count > 1 { + s.maybe_periodic = Some((period, count - 1)); + } else { + s.maybe_periodic = None; + } + let wake = now + period; + // If scheduled is named, place its information in `Lookup` + if let Some(ref id) = s.maybe_id { + let wake_index = Agenda::::decode_len(wake).unwrap_or(0); + Lookup::::insert(id, (wake, wake_index as u32)); + } + Agenda::::append(wake, Some(s)); + } + } + total_weight } } @@ -302,16 +427,16 @@ pub mod pallet { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::RuntimeCall>, + call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule( DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), - T::Preimages::bound(*call)?, + *call, )?; Ok(()) } @@ -320,7 +445,7 @@ pub mod pallet { #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; Ok(()) } @@ -329,30 +454,30 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named( origin: OriginFor, - id: TaskName, + id: Vec, when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::RuntimeCall>, + call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( id, DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), - T::Preimages::bound(*call)?, + *call, )?; Ok(()) } /// Cancel a named scheduled task. #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] - pub fn cancel_named(origin: OriginFor, id: TaskName) -> DispatchResult { + pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; Ok(()) } @@ -368,16 +493,16 @@ pub mod pallet { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::RuntimeCall>, + call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule( DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), - T::Preimages::bound(*call)?, + *call, )?; Ok(()) } @@ -390,85 +515,55 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named_after( origin: OriginFor, - id: TaskName, + id: Vec, after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::RuntimeCall>, + call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( id, DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), - T::Preimages::bound(*call)?, + *call, )?; Ok(()) } } } -impl> Pallet { - /// Migrate storage format from V1 to V4. +impl Pallet { + /// Migrate storage format from V1 to V3. /// /// Returns the weight consumed by this migration. - pub fn migrate_v1_to_v4() -> Weight { - use migration::v1 as old; + pub fn migrate_v1_to_v3() -> Weight { let mut weight = T::DbWeight::get().reads_writes(1, 1); - // Delete all undecodable values. - // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. - let keys = old::Agenda::::iter_keys().collect::>(); - for key in keys { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if let Err(_) = old::Agenda::::try_get(&key) { - weight.saturating_accrue(T::DbWeight::get().writes(1)); - old::Agenda::::remove(&key); - log::warn!("Deleted undecodable agenda"); - } - } + Agenda::::translate::::Call, T::BlockNumber>>>, _>( + |_, agenda| { + Some( + agenda + .into_iter() + .map(|schedule| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - Agenda::::translate::< - Vec::RuntimeCall, T::BlockNumber>>>, - _, - >(|_, agenda| { - Some(BoundedVec::truncate_from( - agenda - .into_iter() - .map(|schedule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - schedule.and_then(|schedule| { - if let Some(id) = schedule.maybe_id.as_ref() { - let name = blake2_256(id); - if let Some(item) = old::Lookup::::take(id) { - Lookup::::insert(name, item); - } - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - - let call = T::Preimages::bound(schedule.call).ok()?; - - if call.lookup_needed() { - weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); - } - - Some(Scheduled { - maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + schedule.map(|schedule| ScheduledV3 { + maybe_id: schedule.maybe_id, priority: schedule.priority, - call, + call: schedule.call.into(), maybe_periodic: schedule.maybe_periodic, origin: system::RawOrigin::Root.into(), _phantom: Default::default(), }) }) - }) - .collect::>(), - )) - }); + .collect::>(), + ) + }, + ); #[allow(deprecated)] frame_support::storage::migration::remove_storage_prefix( @@ -477,62 +572,34 @@ impl> Pallet { &[], ); - StorageVersion::new(4).put::(); + StorageVersion::new(3).put::(); weight + T::DbWeight::get().writes(2) } - /// Migrate storage format from V2 to V4. + /// Migrate storage format from V2 to V3. /// /// Returns the weight consumed by this migration. - pub fn migrate_v2_to_v4() -> Weight { - use migration::v2 as old; + pub fn migrate_v2_to_v3() -> Weight { let mut weight = T::DbWeight::get().reads_writes(1, 1); - // Delete all undecodable values. - // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. - let keys = old::Agenda::::iter_keys().collect::>(); - for key in keys { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if let Err(_) = old::Agenda::::try_get(&key) { - weight.saturating_accrue(T::DbWeight::get().writes(1)); - old::Agenda::::remove(&key); - log::warn!("Deleted undecodable agenda"); - } - } - Agenda::::translate::>>, _>(|_, agenda| { - Some(BoundedVec::truncate_from( + Some( agenda .into_iter() .map(|schedule| { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - schedule.and_then(|schedule| { - if let Some(id) = schedule.maybe_id.as_ref() { - let name = blake2_256(id); - if let Some(item) = old::Lookup::::take(id) { - Lookup::::insert(name, item); - } - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - - let call = T::Preimages::bound(schedule.call).ok()?; - if call.lookup_needed() { - weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); - } - - Some(Scheduled { - maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), - priority: schedule.priority, - call, - maybe_periodic: schedule.maybe_periodic, - origin: schedule.origin, - _phantom: Default::default(), - }) + schedule.map(|schedule| ScheduledV3 { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call.into(), + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin, + _phantom: Default::default(), }) }) .collect::>(), - )) + ) }); #[allow(deprecated)] @@ -542,140 +609,34 @@ impl> Pallet { &[], ); - StorageVersion::new(4).put::(); + StorageVersion::new(3).put::(); weight + T::DbWeight::get().writes(2) } - /// Migrate storage format from V3 to V4. - /// - /// Returns the weight consumed by this migration. - #[allow(deprecated)] - pub fn migrate_v3_to_v4() -> Weight { - use migration::v3 as old; - let mut weight = T::DbWeight::get().reads_writes(2, 1); - - // Delete all undecodable values. - // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. - let blocks = old::Agenda::::iter_keys().collect::>(); - for block in blocks { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if let Err(_) = old::Agenda::::try_get(&block) { - weight.saturating_accrue(T::DbWeight::get().writes(1)); - old::Agenda::::remove(&block); - log::warn!("Deleted undecodable agenda of block: {:?}", block); - } - } - - Agenda::::translate::>>, _>(|block, agenda| { - log::info!("Migrating agenda of block: {:?}", &block); - Some(BoundedVec::truncate_from( - agenda - .into_iter() - .map(|schedule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - schedule - .and_then(|schedule| { - if let Some(id) = schedule.maybe_id.as_ref() { - let name = blake2_256(id); - if let Some(item) = old::Lookup::::take(id) { - Lookup::::insert(name, item); - log::info!("Migrated name for id: {:?}", id); - } else { - log::error!("No name in Lookup for id: {:?}", &id); - } - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } else { - log::info!("Schedule is unnamed"); - } - - let call = match schedule.call { - MaybeHashed::Hash(h) => { - let bounded = Bounded::from_legacy_hash(h); - // Check that the call can be decoded in the new runtime. - if let Err(err) = T::Preimages::peek::< - ::RuntimeCall, - >(&bounded) - { - log::error!( - "Dropping undecodable call {}: {:?}", - &h, - &err - ); - return None - } - weight.saturating_accrue(T::DbWeight::get().reads(1)); - log::info!("Migrated call by hash, hash: {:?}", h); - bounded - }, - MaybeHashed::Value(v) => { - let call = T::Preimages::bound(v) - .map_err(|e| { - log::error!("Could not bound Call: {:?}", e) - }) - .ok()?; - if call.lookup_needed() { - weight.saturating_accrue( - T::DbWeight::get().reads_writes(0, 1), - ); - } - log::info!( - "Migrated call by value, hash: {:?}", - call.hash() - ); - call - }, - }; - - Some(Scheduled { - maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), - priority: schedule.priority, - call, - maybe_periodic: schedule.maybe_periodic, - origin: schedule.origin, - _phantom: Default::default(), - }) - }) - .or_else(|| { - log::info!("Schedule in agenda for block {:?} is empty - nothing to do here.", &block); - None - }) - }) - .collect::>(), - )) - }); - - #[allow(deprecated)] - frame_support::storage::migration::remove_storage_prefix( - Self::name().as_bytes(), - b"StorageVersion", - &[], - ); + #[cfg(feature = "try-runtime")] + pub fn pre_migrate_to_v3() -> Result<(), &'static str> { + Ok(()) + } - StorageVersion::new(4).put::(); + #[cfg(feature = "try-runtime")] + pub fn post_migrate_to_v3() -> Result<(), &'static str> { + use frame_support::dispatch::GetStorageVersion; - weight + T::DbWeight::get().writes(2) + assert!(Self::current_storage_version() == 3); + for k in Agenda::::iter_keys() { + let _ = Agenda::::try_get(k).map_err(|()| "Invalid item in Agenda")?; + } + Ok(()) } -} -impl Pallet { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec< - Option< - Scheduled< - TaskName, - Bounded<::RuntimeCall>, - T::BlockNumber, - OldOrigin, - T::AccountId, - >, - >, - >, + Vec, T::BlockNumber, OldOrigin, T::AccountId>>>, _, >(|_, agenda| { - Some(BoundedVec::truncate_from( + Some( agenda .into_iter() .map(|schedule| { @@ -689,7 +650,7 @@ impl Pallet { }) }) .collect::>(), - )) + ) }); } @@ -710,64 +671,34 @@ impl Pallet { Ok(when) } - fn place_task( - when: T::BlockNumber, - what: ScheduledOf, - ) -> Result, (DispatchError, ScheduledOf)> { - let maybe_name = what.maybe_id; - let index = Self::push_to_agenda(when, what)?; - let address = (when, index); - if let Some(name) = maybe_name { - Lookup::::insert(name, address) - } - Self::deposit_event(Event::Scheduled { when: address.0, index: address.1 }); - Ok(address) - } - - fn push_to_agenda( - when: T::BlockNumber, - what: ScheduledOf, - ) -> Result)> { - let mut agenda = Agenda::::get(when); - let index = if (agenda.len() as u32) < T::MaxScheduledPerBlock::get() { - // will always succeed due to the above check. - let _ = agenda.try_push(Some(what)); - agenda.len() as u32 - 1 - } else { - if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) { - agenda[hole_index] = Some(what); - hole_index as u32 - } else { - return Err((DispatchError::Exhausted, what)) - } - }; - Agenda::::insert(when, agenda); - Ok(index) - } - fn do_schedule( when: DispatchTime, maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, + call: CallOrHashOf, ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; + call.ensure_requested::(); // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let task = Scheduled { + let s = Some(Scheduled { maybe_id: None, priority, call, maybe_periodic, origin, - _phantom: PhantomData, - }; - Self::place_task(when, task).map_err(|x| x.0) + _phantom: PhantomData::::default(), + }); + Agenda::::append(when, s); + let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; + Self::deposit_event(Event::Scheduled { when, index }); + + Ok((when, index)) } fn do_cancel( @@ -777,7 +708,7 @@ impl Pallet { let scheduled = Agenda::::try_mutate(when, |agenda| { agenda.get_mut(index as usize).map_or( Ok(None), - |s| -> Result>, DispatchError> { + |s| -> Result>, DispatchError> { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { if matches!( T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), @@ -791,7 +722,7 @@ impl Pallet { ) })?; if let Some(s) = scheduled { - T::Preimages::drop(&s.call); + s.call.ensure_unrequested::(); if let Some(id) = s.maybe_id { Lookup::::remove(id); } @@ -812,23 +743,27 @@ impl Pallet { return Err(Error::::RescheduleNoChange.into()) } - let task = Agenda::::try_mutate(when, |agenda| { + Agenda::::try_mutate(when, |agenda| -> DispatchResult { let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - ensure!(!matches!(task, Some(Scheduled { maybe_id: Some(_), .. })), Error::::Named); - task.take().ok_or(Error::::NotFound) + let task = task.take().ok_or(Error::::NotFound)?; + Agenda::::append(new_time, Some(task)); + Ok(()) })?; + + let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; Self::deposit_event(Event::Canceled { when, index }); + Self::deposit_event(Event::Scheduled { when: new_time, index: new_index }); - Self::place_task(new_time, task).map_err(|x| x.0) + Ok((new_time, new_index)) } fn do_schedule_named( - id: TaskName, + id: Vec, when: DispatchTime, maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, + call: CallOrHashOf, ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -837,24 +772,32 @@ impl Pallet { let when = Self::resolve_time(when)?; + call.ensure_requested::(); + // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let task = Scheduled { - maybe_id: Some(id), + let s = Scheduled { + maybe_id: Some(id.clone()), priority, call, maybe_periodic, origin, _phantom: Default::default(), }; - Self::place_task(when, task).map_err(|x| x.0) + Agenda::::append(when, Some(s)); + let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; + let address = (when, index); + Lookup::::insert(&id, &address); + Self::deposit_event(Event::Scheduled { when, index }); + + Ok(address) } - fn do_cancel_named(origin: Option, id: TaskName) -> DispatchResult { + fn do_cancel_named(origin: Option, id: Vec) -> DispatchResult { Lookup::::try_mutate_exists(id, |lookup| -> DispatchResult { if let Some((when, index)) = lookup.take() { let i = index as usize; @@ -867,7 +810,7 @@ impl Pallet { ) { return Err(BadOrigin.into()) } - T::Preimages::drop(&s.call); + s.call.ensure_unrequested::(); } *s = None; } @@ -882,245 +825,42 @@ impl Pallet { } fn do_reschedule_named( - id: TaskName, + id: Vec, new_time: DispatchTime, ) -> Result, DispatchError> { let new_time = Self::resolve_time(new_time)?; - let lookup = Lookup::::get(id); - let (when, index) = lookup.ok_or(Error::::NotFound)?; - - if new_time == when { - return Err(Error::::RescheduleNoChange.into()) - } - - let task = Agenda::::try_mutate(when, |agenda| { - let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - task.take().ok_or(Error::::NotFound) - })?; - Self::deposit_event(Event::Canceled { when, index }); - Self::place_task(new_time, task).map_err(|x| x.0) - } -} - -enum ServiceTaskError { - /// Could not be executed due to missing preimage. - Unavailable, - /// Could not be executed due to weight limitations. - Overweight, -} -use ServiceTaskError::*; - -impl Pallet { - /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. - fn service_agendas(weight: &mut WeightMeter, now: T::BlockNumber, max: u32) { - if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { - return - } + Lookup::::try_mutate_exists( + id, + |lookup| -> Result, DispatchError> { + let (when, index) = lookup.ok_or(Error::::NotFound)?; - let mut incomplete_since = now + One::one(); - let mut when = IncompleteSince::::take().unwrap_or(now); - let mut executed = 0; + if new_time == when { + return Err(Error::::RescheduleNoChange.into()) + } - let max_items = T::MaxScheduledPerBlock::get(); - let mut count_down = max; - let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items); - while count_down > 0 && when <= now && weight.can_accrue(service_agenda_base_weight) { - if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) { - incomplete_since = incomplete_since.min(when); - } - when.saturating_inc(); - count_down.saturating_dec(); - } - incomplete_since = incomplete_since.min(when); - if incomplete_since <= now { - IncompleteSince::::put(incomplete_since); - } - } + Agenda::::try_mutate(when, |agenda| -> DispatchResult { + let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; + let task = task.take().ok_or(Error::::NotFound)?; + Agenda::::append(new_time, Some(task)); - /// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a - /// later block. - fn service_agenda( - weight: &mut WeightMeter, - executed: &mut u32, - now: T::BlockNumber, - when: T::BlockNumber, - max: u32, - ) -> bool { - let mut agenda = Agenda::::get(when); - let mut ordered = agenda - .iter() - .enumerate() - .filter_map(|(index, maybe_item)| { - maybe_item.as_ref().map(|item| (index as u32, item.priority)) - }) - .collect::>(); - ordered.sort_by_key(|k| k.1); - let within_limit = - weight.check_accrue(T::WeightInfo::service_agenda_base(ordered.len() as u32)); - debug_assert!(within_limit, "weight limit should have been checked in advance"); - - // Items which we know can be executed and have postponed for execution in a later block. - let mut postponed = (ordered.len() as u32).saturating_sub(max); - // Items which we don't know can ever be executed. - let mut dropped = 0; - - for (agenda_index, _) in ordered.into_iter().take(max as usize) { - let task = match agenda[agenda_index as usize].take() { - None => continue, - Some(t) => t, - }; - let base_weight = T::WeightInfo::service_task( - task.call.lookup_len().map(|x| x as usize), - task.maybe_id.is_some(), - task.maybe_periodic.is_some(), - ); - if !weight.can_accrue(base_weight) { - postponed += 1; - break - } - let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); - agenda[agenda_index as usize] = match result { - Err((Unavailable, slot)) => { - dropped += 1; - slot - }, - Err((Overweight, slot)) => { - postponed += 1; - slot - }, - Ok(()) => { - *executed += 1; - None - }, - }; - } - if postponed > 0 || dropped > 0 { - Agenda::::insert(when, agenda); - } else { - Agenda::::remove(when); - } - postponed == 0 - } + Ok(()) + })?; - /// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter. - /// - /// This involves: - /// - removing and potentially replacing the `Lookup` entry for the task. - /// - realizing the task's call which can include a preimage lookup. - /// - Rescheduling the task for execution in a later agenda if periodic. - fn service_task( - weight: &mut WeightMeter, - now: T::BlockNumber, - when: T::BlockNumber, - agenda_index: u32, - is_first: bool, - mut task: ScheduledOf, - ) -> Result<(), (ServiceTaskError, Option>)> { - if let Some(ref id) = task.maybe_id { - Lookup::::remove(id); - } + let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; + Self::deposit_event(Event::Canceled { when, index }); + Self::deposit_event(Event::Scheduled { when: new_time, index: new_index }); - let (call, lookup_len) = match T::Preimages::peek(&task.call) { - Ok(c) => c, - Err(_) => return Err((Unavailable, Some(task))), - }; + *lookup = Some((new_time, new_index)); - weight.check_accrue(T::WeightInfo::service_task( - lookup_len.map(|x| x as usize), - task.maybe_id.is_some(), - task.maybe_periodic.is_some(), - )); - - match Self::execute_dispatch(weight, task.origin.clone(), call) { - Err(Unavailable) => { - debug_assert!(false, "Checked to exist with `peek`"); - Self::deposit_event(Event::CallUnavailable { - task: (when, agenda_index), - id: task.maybe_id, - }); - Err((Unavailable, Some(task))) + Ok((new_time, new_index)) }, - Err(Overweight) if is_first => { - T::Preimages::drop(&task.call); - Self::deposit_event(Event::PermanentlyOverweight { - task: (when, agenda_index), - id: task.maybe_id, - }); - Err((Unavailable, Some(task))) - }, - Err(Overweight) => Err((Overweight, Some(task))), - Ok(result) => { - Self::deposit_event(Event::Dispatched { - task: (when, agenda_index), - id: task.maybe_id, - result, - }); - if let &Some((period, count)) = &task.maybe_periodic { - if count > 1 { - task.maybe_periodic = Some((period, count - 1)); - } else { - task.maybe_periodic = None; - } - let wake = now.saturating_add(period); - match Self::place_task(wake, task) { - Ok(_) => {}, - Err((_, task)) => { - // TODO: Leave task in storage somewhere for it to be rescheduled - // manually. - T::Preimages::drop(&task.call); - Self::deposit_event(Event::PeriodicFailed { - task: (when, agenda_index), - id: task.maybe_id, - }); - }, - } - } else { - T::Preimages::drop(&task.call); - } - Ok(()) - }, - } - } - - /// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight` - /// counter does not exceed its limit and that it is counted accurately (e.g. accounted using - /// post info if available). - /// - /// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the - /// call itself). - fn execute_dispatch( - weight: &mut WeightMeter, - origin: T::PalletsOrigin, - call: ::RuntimeCall, - ) -> Result { - let base_weight = match origin.as_system_ref() { - Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(), - _ => T::WeightInfo::execute_dispatch_unsigned(), - }; - let call_weight = call.get_dispatch_info().weight; - // We only allow a scheduled call if it cannot push the weight past the limit. - let max_weight = base_weight.saturating_add(call_weight); - - if !weight.can_accrue(max_weight) { - return Err(Overweight) - } - - let dispatch_origin = origin.into(); - let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { - Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => - (error_and_info.post_info.actual_weight, Err(error_and_info.error)), - }; - let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); - weight.check_accrue(base_weight); - weight.check_accrue(call_weight); - Ok(result) + ) } } -impl> - schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> for Pallet +impl schedule::v2::Anon::Call, T::PalletsOrigin> + for Pallet { type Address = TaskAddress; type Hash = T::Hash; @@ -1132,8 +872,6 @@ impl> origin: T::PalletsOrigin, call: CallOrHashOf, ) -> Result { - let call = call.as_value().ok_or(DispatchError::CannotLookup)?; - let call = T::Preimages::bound(call)?.transmute(); Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -1153,8 +891,8 @@ impl> } } -impl> - schedule::v2::Named::RuntimeCall, T::PalletsOrigin> for Pallet +impl schedule::v2::Named::Call, T::PalletsOrigin> + for Pallet { type Address = TaskAddress; type Hash = T::Hash; @@ -1167,108 +905,23 @@ impl> origin: T::PalletsOrigin, call: CallOrHashOf, ) -> Result { - let call = call.as_value().ok_or(())?; - let call = T::Preimages::bound(call).map_err(|_| ())?.transmute(); - let name = blake2_256(&id[..]); - Self::do_schedule_named(name, when, maybe_periodic, priority, origin, call).map_err(|_| ()) + Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } fn cancel_named(id: Vec) -> Result<(), ()> { - let name = blake2_256(&id[..]); - Self::do_cancel_named(None, name).map_err(|_| ()) + Self::do_cancel_named(None, id).map_err(|_| ()) } fn reschedule_named( id: Vec, when: DispatchTime, ) -> Result { - let name = blake2_256(&id[..]); - Self::do_reschedule_named(name, when) + Self::do_reschedule_named(id, when) } fn next_dispatch_time(id: Vec) -> Result { - let name = blake2_256(&id[..]); - Lookup::::get(name) - .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) - .ok_or(()) - } -} - -impl schedule::v3::Anon::RuntimeCall, T::PalletsOrigin> - for Pallet -{ - type Address = TaskAddress; - - fn schedule( - when: DispatchTime, - maybe_periodic: Option>, - priority: schedule::Priority, - origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, - ) -> Result { - Self::do_schedule(when, maybe_periodic, priority, origin, call) - } - - fn cancel((when, index): Self::Address) -> Result<(), DispatchError> { - Self::do_cancel(None, (when, index)).map_err(map_err_to_v3_err::) - } - - fn reschedule( - address: Self::Address, - when: DispatchTime, - ) -> Result { - Self::do_reschedule(address, when).map_err(map_err_to_v3_err::) - } - - fn next_dispatch_time((when, index): Self::Address) -> Result { - Agenda::::get(when) - .get(index as usize) - .ok_or(DispatchError::Unavailable) - .map(|_| when) - } -} - -use schedule::v3::TaskName; - -impl schedule::v3::Named::RuntimeCall, T::PalletsOrigin> - for Pallet -{ - type Address = TaskAddress; - - fn schedule_named( - id: TaskName, - when: DispatchTime, - maybe_periodic: Option>, - priority: schedule::Priority, - origin: T::PalletsOrigin, - call: Bounded<::RuntimeCall>, - ) -> Result { - Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call) - } - - fn cancel_named(id: TaskName) -> Result<(), DispatchError> { - Self::do_cancel_named(None, id).map_err(map_err_to_v3_err::) - } - - fn reschedule_named( - id: TaskName, - when: DispatchTime, - ) -> Result { - Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::) - } - - fn next_dispatch_time(id: TaskName) -> Result { Lookup::::get(id) .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) - .ok_or(DispatchError::Unavailable) - } -} - -/// Maps a pallet error to an `schedule::v3` error. -fn map_err_to_v3_err(err: DispatchError) -> DispatchError { - if err == DispatchError::from(Error::::NotFound) { - DispatchError::Unavailable - } else { - err + .ok_or(()) } } diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs deleted file mode 100644 index 6769d20023196..0000000000000 --- a/frame/scheduler/src/migration.rs +++ /dev/null @@ -1,402 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Migrations for the scheduler pallet. - -use super::*; -use frame_support::traits::OnRuntimeUpgrade; - -/// The log target. -const TARGET: &'static str = "runtime::scheduler::migration"; - -pub mod v1 { - use super::*; - use frame_support::pallet_prelude::*; - - #[frame_support::storage_alias] - pub(crate) type Agenda = StorageMap< - Pallet, - Twox64Concat, - ::BlockNumber, - Vec< - Option< - ScheduledV1<::RuntimeCall, ::BlockNumber>, - >, - >, - ValueQuery, - >; - - #[frame_support::storage_alias] - pub(crate) type Lookup = StorageMap< - Pallet, - Twox64Concat, - Vec, - TaskAddress<::BlockNumber>, - >; -} - -pub mod v2 { - use super::*; - use frame_support::pallet_prelude::*; - - #[frame_support::storage_alias] - pub(crate) type Agenda = StorageMap< - Pallet, - Twox64Concat, - ::BlockNumber, - Vec>>, - ValueQuery, - >; - - #[frame_support::storage_alias] - pub(crate) type Lookup = StorageMap< - Pallet, - Twox64Concat, - Vec, - TaskAddress<::BlockNumber>, - >; -} - -pub mod v3 { - use super::*; - use frame_support::pallet_prelude::*; - - #[frame_support::storage_alias] - pub(crate) type Agenda = StorageMap< - Pallet, - Twox64Concat, - ::BlockNumber, - Vec>>, - ValueQuery, - >; - - #[frame_support::storage_alias] - pub(crate) type Lookup = StorageMap< - Pallet, - Twox64Concat, - Vec, - TaskAddress<::BlockNumber>, - >; - - /// Migrate the scheduler pallet from V3 to V4. - pub struct MigrateToV4(sp_std::marker::PhantomData); - - impl> OnRuntimeUpgrade for MigrateToV4 { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - assert_eq!(StorageVersion::get::>(), 3, "Can only upgrade from version 3"); - - let agendas = Agenda::::iter_keys().count() as u32; - let decodable_agendas = Agenda::::iter_values().count() as u32; - if agendas != decodable_agendas { - // This is not necessarily an error, but can happen when there are Calls - // in an Agenda that are not valid anymore with the new runtime. - log::error!( - target: TARGET, - "Can only decode {} of {} agendas - others will be dropped", - decodable_agendas, - agendas - ); - } - log::info!(target: TARGET, "Trying to migrate {} agendas...", decodable_agendas); - - // Check that no agenda overflows `MaxScheduledPerBlock`. - let max_scheduled_per_block = T::MaxScheduledPerBlock::get() as usize; - for (block_number, agenda) in Agenda::::iter() { - if agenda.iter().cloned().filter_map(|s| s).count() > max_scheduled_per_block { - log::error!( - target: TARGET, - "Would truncate agenda of block {:?} from {} items to {} items.", - block_number, - agenda.len(), - max_scheduled_per_block, - ); - return Err("Agenda would overflow `MaxScheduledPerBlock`.") - } - } - // Check that bounding the calls will not overflow `MAX_LENGTH`. - let max_length = T::Preimages::MAX_LENGTH as usize; - for (block_number, agenda) in Agenda::::iter() { - for schedule in agenda.iter().cloned().filter_map(|s| s) { - match schedule.call { - frame_support::traits::schedule::MaybeHashed::Value(call) => { - let l = call.using_encoded(|c| c.len()); - if l > max_length { - log::error!( - target: TARGET, - "Call in agenda of block {:?} is too large: {} byte", - block_number, - l, - ); - return Err("Call is too large.") - } - }, - _ => (), - } - } - } - - Ok((decodable_agendas as u32).encode()) - } - - fn on_runtime_upgrade() -> Weight { - let version = StorageVersion::get::>(); - if version != 3 { - log::warn!( - target: TARGET, - "skipping v3 to v4 migration: executed on wrong storage version.\ - Expected version 3, found {:?}", - version, - ); - return T::DbWeight::get().reads(1) - } - - crate::Pallet::::migrate_v3_to_v4() - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - assert_eq!(StorageVersion::get::>(), 4, "Must upgrade"); - - // Check that everything decoded fine. - for k in crate::Agenda::::iter_keys() { - assert!(crate::Agenda::::try_get(k).is_ok(), "Cannot decode V4 Agenda"); - } - - let old_agendas: u32 = - Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); - let new_agendas = crate::Agenda::::iter_keys().count() as u32; - if old_agendas != new_agendas { - // This is not necessarily an error, but can happen when there are Calls - // in an Agenda that are not valid anymore in the new runtime. - log::error!( - target: TARGET, - "Did not migrate all Agendas. Previous {}, Now {}", - old_agendas, - new_agendas, - ); - } else { - log::info!(target: TARGET, "Migrated {} agendas.", new_agendas); - } - - Ok(()) - } - } -} - -#[cfg(test)] -#[cfg(feature = "try-runtime")] -mod test { - use super::*; - use crate::mock::*; - use frame_support::Hashable; - use sp_std::borrow::Cow; - use substrate_test_utils::assert_eq_uvec; - - #[test] - #[allow(deprecated)] - fn migration_v3_to_v4_works() { - new_test_ext().execute_with(|| { - // Assume that we are at V3. - StorageVersion::new(3).put::(); - - // Call that will be bounded to a `Lookup`. - let large_call = - RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1024] }); - // Call that can be inlined. - let small_call = - RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 10] }); - // Call that is already hashed and can will be converted to `Legacy`. - let hashed_call = - RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 2048] }); - let bound_hashed_call = Preimage::bound(hashed_call.clone()).unwrap(); - assert!(bound_hashed_call.lookup_needed()); - // A Call by hash that will fail to decode becomes `None`. - let trash_data = vec![255u8; 1024]; - let undecodable_hash = Preimage::note(Cow::Borrowed(&trash_data)).unwrap(); - - for i in 0..2u64 { - let k = i.twox_64_concat(); - let old = vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: i as u8 + 10, - call: small_call.clone().into(), - maybe_periodic: None, // 1 - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(vec![i as u8; 32]), - priority: 123, - call: large_call.clone().into(), - maybe_periodic: Some((4u64, 20)), - origin: signed(i), - _phantom: PhantomData::::default(), - }), - Some(ScheduledV3Of:: { - maybe_id: Some(vec![255 - i as u8; 320]), - priority: 123, - call: MaybeHashed::Hash(bound_hashed_call.hash()), - maybe_periodic: Some((8u64, 10)), - origin: signed(i), - _phantom: PhantomData::::default(), - }), - Some(ScheduledV3Of:: { - maybe_id: Some(vec![i as u8; 320]), - priority: 123, - call: MaybeHashed::Hash(undecodable_hash.clone()), - maybe_periodic: Some((4u64, 20)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ]; - frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); - } - - let state = v3::MigrateToV4::::pre_upgrade().unwrap(); - let _w = v3::MigrateToV4::::on_runtime_upgrade(); - v3::MigrateToV4::::post_upgrade(state).unwrap(); - - let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); - x.sort_by_key(|x| x.0); - - let bound_large_call = Preimage::bound(large_call).unwrap(); - assert!(bound_large_call.lookup_needed()); - let bound_small_call = Preimage::bound(small_call).unwrap(); - assert!(!bound_small_call.lookup_needed()); - - let expected = vec![ - ( - 0, - vec![ - Some(ScheduledOf:: { - maybe_id: None, - priority: 10, - call: bound_small_call.clone(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&[0u8; 32])), - priority: 123, - call: bound_large_call.clone(), - maybe_periodic: Some((4u64, 20)), - origin: signed(0), - _phantom: PhantomData::::default(), - }), - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&[255u8; 320])), - priority: 123, - call: Bounded::from_legacy_hash(bound_hashed_call.hash()), - maybe_periodic: Some((8u64, 10)), - origin: signed(0), - _phantom: PhantomData::::default(), - }), - None, - ], - ), - ( - 1, - vec![ - Some(ScheduledOf:: { - maybe_id: None, - priority: 11, - call: bound_small_call.clone(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&[1u8; 32])), - priority: 123, - call: bound_large_call.clone(), - maybe_periodic: Some((4u64, 20)), - origin: signed(1), - _phantom: PhantomData::::default(), - }), - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&[254u8; 320])), - priority: 123, - call: Bounded::from_legacy_hash(bound_hashed_call.hash()), - maybe_periodic: Some((8u64, 10)), - origin: signed(1), - _phantom: PhantomData::::default(), - }), - None, - ], - ), - ]; - for (outer, (i, j)) in x.iter().zip(expected.iter()).enumerate() { - assert_eq!(i.0, j.0); - for (inner, (x, y)) in i.1.iter().zip(j.1.iter()).enumerate() { - assert_eq!(x, y, "at index: outer {} inner {}", outer, inner); - } - } - assert_eq_uvec!(x, expected); - - assert_eq!(StorageVersion::get::(), 4); - }); - } - - #[test] - #[allow(deprecated)] - fn migration_v3_to_v4_too_large_calls_are_ignored() { - new_test_ext().execute_with(|| { - // Assume that we are at V3. - StorageVersion::new(3).put::(); - - let too_large_call = RuntimeCall::System(frame_system::Call::remark { - remark: vec![0; ::Preimages::MAX_LENGTH + 1], - }); - - let i = 0u64; - let k = i.twox_64_concat(); - let old = vec![Some(ScheduledV3Of:: { - maybe_id: None, - priority: 1, - call: too_large_call.clone().into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - })]; - frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); - - // The pre_upgrade hook fails: - let err = v3::MigrateToV4::::pre_upgrade().unwrap_err(); - assert!(err.contains("Call is too large")); - // But the migration itself works: - let _w = v3::MigrateToV4::::on_runtime_upgrade(); - - let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); - x.sort_by_key(|x| x.0); - // The call becomes `None`. - let expected = vec![(0, vec![None])]; - assert_eq_uvec!(x, expected); - - assert_eq!(StorageVersion::get::(), 4); - }); - } - - fn signed(i: u64) -> OriginCaller { - system::RawOrigin::Signed(i).into() - } -} diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 61efdfb67b73e..008105dc737ea 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -39,14 +39,15 @@ use sp_runtime::{ #[frame_support::pallet] pub mod logger { use super::{OriginCaller, OriginTrait}; - use frame_support::{pallet_prelude::*, parameter_types}; + use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use std::cell::RefCell; - parameter_types! { - static Log: Vec<(OriginCaller, u32)> = Vec::new(); + thread_local! { + static LOG: RefCell> = RefCell::new(Vec::new()); } pub fn log() -> Vec<(OriginCaller, u32)> { - Log::get().clone() + LOG.with(|log| log.borrow().clone()) } #[pallet::pallet] @@ -58,7 +59,7 @@ pub mod logger { #[pallet::config] pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::event] @@ -70,13 +71,13 @@ pub mod logger { #[pallet::call] impl Pallet where - ::RuntimeOrigin: OriginTrait, + ::Origin: OriginTrait, { #[pallet::weight(*weight)] pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); - Log::mutate(|log| { - log.push((origin.caller().clone(), i)); + LOG.with(|log| { + log.borrow_mut().push((origin.caller().clone(), i)); }); Ok(()) } @@ -84,8 +85,8 @@ pub mod logger { #[pallet::weight(*weight)] pub fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); - Log::mutate(|log| { - log.push((origin.caller().clone(), i)); + LOG.with(|log| { + log.borrow_mut().push((origin.caller().clone(), i)); }); Ok(()) } @@ -110,25 +111,23 @@ frame_support::construct_runtime!( // Scheduler must dispatch with root and no filter, this tests base filter is indeed not used. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &RuntimeCall) -> bool { - !matches!(call, RuntimeCall::Logger(LoggerCall::log { .. })) +impl Contains for BaseFilter { + fn contains(call: &Call) -> bool { + !matches!(call, Call::Logger(LoggerCall::log { .. })) } } parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - Weight::from_ref_time(2_000_000_000_000).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); } impl system::Config for Test { type BaseCallFilter = BaseFilter; - type BlockWeights = BlockWeights; + type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -136,7 +135,7 @@ impl system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -149,76 +148,38 @@ impl system::Config for Test { type MaxConsumers = ConstU32<16>; } impl logger::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; +} +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; + pub const NoPreimagePostponement: Option = Some(2); } ord_parameter_types! { pub const One: u64 = 1; } impl pallet_preimage::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); type Currency = (); type ManagerOrigin = EnsureRoot; + type MaxSize = ConstU32<1024>; type BaseDeposit = (); type ByteDeposit = (); } -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn service_agendas_base() -> Weight { - Weight::from_ref_time(0b0000_0001) - } - fn service_agenda_base(i: u32) -> Weight { - Weight::from_ref_time((i << 8) as u64 + 0b0000_0010) - } - fn service_task_base() -> Weight { - Weight::from_ref_time(0b0000_0100) - } - fn service_task_periodic() -> Weight { - Weight::from_ref_time(0b0000_1100) - } - fn service_task_named() -> Weight { - Weight::from_ref_time(0b0001_0100) - } - fn service_task_fetched(s: u32) -> Weight { - Weight::from_ref_time((s << 8) as u64 + 0b0010_0100) - } - fn execute_dispatch_signed() -> Weight { - Weight::from_ref_time(0b0100_0000) - } - fn execute_dispatch_unsigned() -> Weight { - Weight::from_ref_time(0b1000_0000) - } - fn schedule(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn cancel(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn schedule_named(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn cancel_named(_s: u32) -> Weight { - Weight::from_ref_time(50) - } -} -parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * - BlockWeights::get().max_block; -} - impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; + type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EitherOfDiverse, EnsureSignedBy>; type MaxScheduledPerBlock = ConstU32<10>; - type WeightInfo = TestWeightInfo; + type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type Preimages = Preimage; + type PreimageProvider = Preimage; + type NoPreimagePostponement = NoPreimagePostponement; } pub type LoggerCall = logger::Call; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index 033d787946709..d2a795cb19fa4 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -18,12 +18,10 @@ //! # Scheduler tests. use super::*; -use crate::mock::{ - logger, new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *, -}; +use crate::mock::{logger, new_test_ext, root, run_to_block, Call, LoggerCall, Scheduler, Test, *}; use frame_support::{ assert_err, assert_noop, assert_ok, - traits::{Contains, GetStorageVersion, OnInitialize, QueryPreimage, StorePreimage}, + traits::{Contains, GetStorageVersion, OnInitialize, PreimageProvider}, Hashable, }; use sp_runtime::traits::Hash; @@ -32,16 +30,9 @@ use substrate_test_utils::assert_eq_uvec; #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); assert!(!::BaseCallFilter::contains(&call)); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap() - )); + assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into())); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); @@ -54,20 +45,50 @@ fn basic_scheduling_works() { #[test] fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); let hash = ::Hashing::hash_of(&call); - let len = call.using_encoded(|x| x.len()) as u32; - let hashed = Preimage::pick(hash, len); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); + let hashed = MaybeHashed::Hash(hash.clone()); + assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); - assert!(Preimage::is_requested(&hash)); + assert!(Preimage::preimage_requested(&hash)); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); - assert!(!Preimage::len(&hash).is_some()); - assert!(!Preimage::is_requested(&hash)); + assert!(!Preimage::have_preimage(&hash)); + assert!(!Preimage::preimage_requested(&hash)); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn scheduling_with_preimage_postpones_correctly() { + new_test_ext().execute_with(|| { + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let hash = ::Hashing::hash_of(&call); + let hashed = MaybeHashed::Hash(hash.clone()); + + assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); + assert!(Preimage::preimage_requested(&hash)); + + run_to_block(4); + // #4 empty due to no preimage + assert!(logger::log().is_empty()); + + // Register preimage. + assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); + + run_to_block(5); + // #5 empty since postponement is 2 blocks. + assert!(logger::log().is_empty()); + + run_to_block(6); + // #6 is good. assert_eq!(logger::log(), vec![(root(), 42u32)]); + assert!(!Preimage::have_preimage(&hash)); + assert!(!Preimage::preimage_requested(&hash)); + run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); @@ -77,17 +98,10 @@ fn scheduling_with_preimages_works() { fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 - assert_ok!(Scheduler::do_schedule( - DispatchTime::After(3), - None, - 127, - root(), - Preimage::bound(call).unwrap() - )); + assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call.into())); run_to_block(5); assert!(logger::log().is_empty()); run_to_block(6); @@ -101,16 +115,9 @@ fn schedule_after_works() { fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); assert!(!::BaseCallFilter::contains(&call)); - assert_ok!(Scheduler::do_schedule( - DispatchTime::After(0), - None, - 127, - root(), - Preimage::bound(call).unwrap() - )); + assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call.into())); // Will trigger on the next block. run_to_block(3); assert_eq!(logger::log(), vec![(root(), 42u32)]); @@ -128,11 +135,7 @@ fn periodic_scheduling_works() { Some((3, 3)), 127, root(), - Preimage::bound(RuntimeCall::Logger(logger::Call::log { - i: 42, - weight: Weight::from_ref_time(10) - })) - .unwrap() + Call::Logger(logger::Call::log { i: 42, weight: 1000 }).into() )); run_to_block(3); assert!(logger::log().is_empty()); @@ -154,18 +157,10 @@ fn periodic_scheduling_works() { #[test] fn reschedule_works() { new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( - Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap() - ) - .unwrap(), + Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into()).unwrap(), (4, 0) ); @@ -193,17 +188,16 @@ fn reschedule_works() { #[test] fn reschedule_named_works() { new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( - [1u8; 32], + 1u32.encode(), DispatchTime::At(4), None, 127, root(), - Preimage::bound(call).unwrap(), + call.into(), ) .unwrap(), (4, 0) @@ -212,10 +206,13 @@ fn reschedule_named_works() { run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), + (6, 0) + ); assert_noop!( - Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)), + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), Error::::RescheduleNoChange ); @@ -233,17 +230,16 @@ fn reschedule_named_works() { #[test] fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( - [1u8; 32], + 1u32.encode(), DispatchTime::At(4), Some((3, 3)), 127, root(), - Preimage::bound(call).unwrap(), + call.into(), ) .unwrap(), (4, 0) @@ -252,8 +248,14 @@ fn reschedule_named_perodic_works() { run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(5)).unwrap(), (5, 0)); - assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), + (5, 0) + ); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), + (6, 0) + ); run_to_block(5); assert!(logger::log().is_empty()); @@ -262,7 +264,7 @@ fn reschedule_named_perodic_works() { assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!( - Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(10)).unwrap(), + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), (10, 0) ); @@ -285,16 +287,12 @@ fn cancel_named_scheduling_works_with_normal_cancel() { new_test_ext().execute_with(|| { // at #4. Scheduler::do_schedule_named( - [1u8; 32], + 1u32.encode(), DispatchTime::At(4), None, 127, root(), - Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), ) .unwrap(); let i = Scheduler::do_schedule( @@ -302,16 +300,12 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })) - .unwrap(), + Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into(), ) .unwrap(); run_to_block(3); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); + assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); assert_ok!(Scheduler::do_cancel(None, i)); run_to_block(100); assert!(logger::log().is_empty()); @@ -323,44 +317,32 @@ fn cancel_named_periodic_scheduling_works() { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. Scheduler::do_schedule_named( - [1u8; 32], + 1u32.encode(), DispatchTime::At(4), Some((3, 3)), 127, root(), - Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })) - .unwrap(), + Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into(), ) .unwrap(); // same id results in error. assert!(Scheduler::do_schedule_named( - [1u8; 32], + 1u32.encode(), DispatchTime::At(4), None, 127, root(), - Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10) - })) - .unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), ) .is_err()); // different id is ok. Scheduler::do_schedule_named( - [2u8; 32], + 2u32.encode(), DispatchTime::At(8), None, 127, root(), - Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), ) .unwrap(); run_to_block(3); @@ -368,7 +350,7 @@ fn cancel_named_periodic_scheduling_works() { run_to_block(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(6); - assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); + assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); @@ -376,23 +358,22 @@ fn cancel_named_periodic_scheduling_works() { #[test] fn scheduler_respects_weight_limits() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) + .into(), )); - let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) + .into(), )); // 69 and 42 do not fit together run_to_block(4); @@ -402,128 +383,50 @@ fn scheduler_respects_weight_limits() { }); } -/// Permanently overweight calls are not deleted but also not executed. #[test] -fn scheduler_does_not_delete_permanently_overweight_call() { - let max_weight: Weight = ::MaximumWeight::get(); +fn scheduler_respects_hard_deadlines_more() { new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, - 127, - root(), - Preimage::bound(call).unwrap(), - )); - // Never executes. - run_to_block(100); - assert_eq!(logger::log(), vec![]); - - // Assert the `PermanentlyOverweight` event. - assert_eq!( - System::events().last().unwrap().event, - crate::Event::PermanentlyOverweight { task: (4, 0), id: None }.into(), - ); - // The call is still in the agenda. - assert!(Agenda::::get(4)[0].is_some()); - }); -} - -#[test] -fn scheduler_handles_periodic_failure() { - let max_weight: Weight = ::MaximumWeight::get(); - let max_per_block = ::MaxScheduledPerBlock::get(); - - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); - let bound = Preimage::bound(call).unwrap(); - - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - Some((4, u32::MAX)), - 127, + 0, root(), - bound.clone(), + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) + .into(), )); - // Executes 5 times till block 20. - run_to_block(20); - assert_eq!(logger::log().len(), 5); - - // Block 28 will already be full. - for _ in 0..max_per_block { - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(28), - None, - 120, - root(), - bound.clone(), - )); - } - - // Going to block 24 will emit a `PeriodicFailed` event. - run_to_block(24); - assert_eq!(logger::log().len(), 6); - - assert_eq!( - System::events().last().unwrap().event, - crate::Event::PeriodicFailed { task: (24, 0), id: None }.into(), - ); - }); -} - -#[test] -fn scheduler_handles_periodic_unavailable_preimage() { - let max_weight: Weight = ::MaximumWeight::get(); - - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); - let hash = ::Hashing::hash_of(&call); - let len = call.using_encoded(|x| x.len()) as u32; - let bound = Preimage::pick(hash, len); - assert_ok!(Preimage::note(call.encode().into())); - assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), - Some((4, u32::MAX)), - 127, + None, + 0, root(), - bound.clone(), + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) + .into(), )); - // Executes 1 times till block 4. + // With base weights, 69 and 42 should not fit together, but do because of hard + // deadlines run_to_block(4); - assert_eq!(logger::log().len(), 1); - - // Unnote the preimage. - Preimage::unnote(&hash); - - // Does not ever execute again. - run_to_block(100); - assert_eq!(logger::log().len(), 1); - - // The preimage is not requested anymore. - assert!(!Preimage::is_requested(&hash)); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); } #[test] fn scheduler_respects_priority_ordering() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 1, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) + .into(), )); - let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) + .into(), )); run_to_block(4); assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); @@ -533,30 +436,30 @@ fn scheduler_respects_priority_ordering() { #[test] fn scheduler_respects_priority_ordering_with_soft_deadlines() { new_test_ext().execute_with(|| { - let max_weight: Weight = ::MaximumWeight::get(); - let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 5 * 2 }); + let max_weight = MaximumSchedulerWeight::get() - <() as WeightInfo>::on_initialize(0); + let item_weight = + <() as WeightInfo>::on_initialize(1) - <() as WeightInfo>::on_initialize(0); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 255, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 42, weight: max_weight / 2 - item_weight }).into(), )); - let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 5 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: max_weight / 2 - item_weight }).into(), )); - let call = RuntimeCall::Logger(LoggerCall::log { i: 2600, weight: max_weight / 5 * 4 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 126, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 2600, weight: max_weight / 2 - item_weight + 1 }) + .into(), )); // 2600 does not fit with 69 or 42, but has higher priority, so will go through @@ -571,96 +474,70 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let call_weight = Weight::from_ref_time(25); + let base_weight = <() as WeightInfo>::on_initialize(0); + let call_weight = MaximumSchedulerWeight::get() / 4; // Named - let call = RuntimeCall::Logger(LoggerCall::log { - i: 3, - weight: call_weight + Weight::from_ref_time(1), - }); assert_ok!(Scheduler::do_schedule_named( - [1u8; 32], + 1u32.encode(), DispatchTime::At(3), None, 255, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 3, weight: call_weight + 1 }).into(), )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: call_weight + Weight::from_ref_time(2), - }); // Anon Periodic assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), Some((1000, 3)), 128, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 42, weight: call_weight + 2 }).into(), )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: call_weight + Weight::from_ref_time(3), - }); // Anon assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), None, 127, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 69, weight: call_weight + 3 }).into(), )); // Named Periodic - let call = RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: call_weight + Weight::from_ref_time(4), - }); assert_ok!(Scheduler::do_schedule_named( - [2u8; 32], + 2u32.encode(), DispatchTime::At(1), Some((1000, 3)), 126, root(), - Preimage::bound(call).unwrap(), + Call::Logger(LoggerCall::log { i: 2600, weight: call_weight + 4 }).into(), )); // Will include the named periodic only + let actual_weight = Scheduler::on_initialize(1); assert_eq!( - Scheduler::on_initialize(1), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(1) + - ::service_task(None, true, true) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(4) + actual_weight, + base_weight + + call_weight + 4 + <() as MarginalWeightInfo>::item(true, true, Some(false)) ); - assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); // Will include anon and anon periodic + let actual_weight = Scheduler::on_initialize(2); assert_eq!( - Scheduler::on_initialize(2), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(2) + - ::service_task(None, false, true) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(3) + - ::service_task(None, false, false) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(2) + actual_weight, + base_weight + + call_weight + 2 + <() as MarginalWeightInfo>::item(false, false, Some(false)) + + call_weight + 3 + <() as MarginalWeightInfo>::item(true, false, Some(false)) ); - assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only + let actual_weight = Scheduler::on_initialize(3); assert_eq!( - Scheduler::on_initialize(3), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(1) + - ::service_task(None, true, false) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_ref_time(1) + actual_weight, + base_weight + + call_weight + 1 + <() as MarginalWeightInfo>::item(false, true, Some(false)) ); - assert_eq!(IncompleteSince::::get(), None); assert_eq!( logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)] @@ -668,34 +545,23 @@ fn on_initialize_weight_is_correct() { // Will contain none let actual_weight = Scheduler::on_initialize(4); - assert_eq!( - actual_weight, - TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(0) - ); + assert_eq!(actual_weight, base_weight); }); } #[test] fn root_calls_works() { new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - assert_ok!( - Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 4, None, 127, call,) - ); - assert_ok!(Scheduler::schedule(RuntimeOrigin::root(), 4, None, 127, call2)); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); + assert_ok!(Scheduler::schedule_named(Origin::root(), 1u32.encode(), 4, None, 127, call,)); + assert_ok!(Scheduler::schedule(Origin::root(), 4, None, 127, call2)); run_to_block(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), [1u8; 32])); - assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1)); + assert_ok!(Scheduler::cancel_named(Origin::root(), 1u32.encode())); + assert_ok!(Scheduler::cancel(Origin::root(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); assert!(logger::log().is_empty()); @@ -707,50 +573,35 @@ fn fails_to_schedule_task_in_the_past() { new_test_ext().execute_with(|| { run_to_block(3); - let call1 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - let call3 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); + let call1 = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); + let call3 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); - assert_noop!( - Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 2, None, 127, call1), + assert_err!( + Scheduler::schedule_named(Origin::root(), 1u32.encode(), 2, None, 127, call1), Error::::TargetBlockNumberInPast, ); - assert_noop!( - Scheduler::schedule(RuntimeOrigin::root(), 2, None, 127, call2), + assert_err!( + Scheduler::schedule(Origin::root(), 2, None, 127, call2), Error::::TargetBlockNumberInPast, ); - assert_noop!( - Scheduler::schedule(RuntimeOrigin::root(), 3, None, 127, call3), + assert_err!( + Scheduler::schedule(Origin::root(), 3, None, 127, call3), Error::::TargetBlockNumberInPast, ); }); } #[test] -fn should_use_origin() { +fn should_use_orign() { new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), - [1u8; 32], + 1u32.encode(), 4, None, 127, @@ -761,7 +612,7 @@ fn should_use_origin() { // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), [1u8; 32])); + assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), 1u32.encode())); assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); @@ -770,20 +621,14 @@ fn should_use_origin() { } #[test] -fn should_check_origin() { +fn should_check_orign() { new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); assert_noop!( Scheduler::schedule_named( system::RawOrigin::Signed(2).into(), - [1u8; 32], + 1u32.encode(), 4, None, 127, @@ -799,19 +644,15 @@ fn should_check_origin() { } #[test] -fn should_check_origin_for_cancel() { +fn should_check_orign_for_cancel() { new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 42, - weight: Weight::from_ref_time(10), - })); + let call = + Box::new(Call::Logger(LoggerCall::log_without_filter { i: 69, weight: 1000 }).into()); + let call2 = + Box::new(Call::Logger(LoggerCall::log_without_filter { i: 42, weight: 1000 }).into()); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), - [1u8; 32], + 1u32.encode(), 4, None, 127, @@ -823,11 +664,14 @@ fn should_check_origin_for_cancel() { assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), [1u8; 32]), + Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), BadOrigin ); assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); - assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), [1u8; 32]), BadOrigin); + assert_noop!( + Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), + BadOrigin + ); assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); run_to_block(5); assert_eq!( @@ -841,7 +685,7 @@ fn should_check_origin_for_cancel() { } #[test] -fn migration_to_v4_works() { +fn migration_to_v3_works() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); @@ -849,129 +693,93 @@ fn migration_to_v4_works() { Some(ScheduledV1 { maybe_id: None, priority: i as u8 + 10, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100), - }), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), maybe_periodic: None, }), None, Some(ScheduledV1 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - }), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), maybe_periodic: Some((456u64, 10)), }), ]; frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); } - Scheduler::migrate_v1_to_v4(); - - let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); - x.sort_by_key(|x| x.0); - let expected = vec![ - ( - 0, - vec![ - Some(ScheduledOf:: { - maybe_id: None, - priority: 10, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100), - })) - .unwrap(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&b"test"[..])), - priority: 123, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ], - ), - ( - 1, - vec![ - Some(ScheduledOf:: { - maybe_id: None, - priority: 11, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100), - })) - .unwrap(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&b"test"[..])), - priority: 123, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ], - ), - ( - 2, - vec![ - Some(ScheduledOf:: { - maybe_id: None, - priority: 12, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100), - })) - .unwrap(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledOf:: { - maybe_id: Some(blake2_256(&b"test"[..])), - priority: 123, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ], - ), - ]; - for (i, j) in x.iter().zip(expected.iter()) { - assert_eq!(i.0, j.0); - for (x, y) in i.1.iter().zip(j.1.iter()) { - assert_eq!(x, y); - } - } - assert_eq_uvec!(x, expected); + Scheduler::migrate_v1_to_v3(); + + assert_eq_uvec!( + Agenda::::iter().collect::>(), + vec![ + ( + 0, + vec![ + Some(ScheduledV3Of:: { + maybe_id: None, + priority: 10, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV3Of:: { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 1, + vec![ + Some(ScheduledV3Of:: { + maybe_id: None, + priority: 11, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV3Of:: { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 2, + vec![ + Some(ScheduledV3Of:: { + maybe_id: None, + priority: 12, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV3Of:: { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ) + ] + ); assert_eq!(Scheduler::current_storage_version(), 3); }); @@ -982,29 +790,21 @@ fn test_migrate_origin() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); - let old: Vec, u64, u32, u64>>> = vec![ + let old: Vec, u64, u32, u64>>> = vec![ Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100), - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), origin: 3u32, maybe_periodic: None, _phantom: Default::default(), }), None, Some(Scheduled { - maybe_id: Some(blake2_256(&b"test"[..])), + maybe_id: Some(b"test".to_vec()), priority: 123, origin: 2u32, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), }), @@ -1025,32 +825,24 @@ fn test_migrate_origin() { Scheduler::migrate_origin::(); assert_eq_uvec!( - Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(), + Agenda::::iter().collect::>(), vec![ ( 0, vec![ - Some(ScheduledOf:: { + Some(ScheduledV2::, u64, OriginCaller, u64> { maybe_id: None, priority: 10, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(Scheduled { - maybe_id: Some(blake2_256(&b"test"[..])), + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), priority: 123, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10) - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1060,27 +852,19 @@ fn test_migrate_origin() { ( 1, vec![ - Some(Scheduled { + Some(ScheduledV2 { maybe_id: None, priority: 11, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(Scheduled { - maybe_id: Some(blake2_256(&b"test"[..])), + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), priority: 123, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10) - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1090,27 +874,19 @@ fn test_migrate_origin() { ( 2, vec![ - Some(Scheduled { + Some(ScheduledV2 { maybe_id: None, priority: 12, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(Scheduled { - maybe_id: Some(blake2_256(&b"test"[..])), + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), priority: 123, - call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10) - })) - .unwrap(), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1121,649 +897,3 @@ fn test_migrate_origin() { ); }); } - -#[test] -fn postponed_named_task_cannot_be_rescheduled() { - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); - let hash = ::Hashing::hash_of(&call); - let len = call.using_encoded(|x| x.len()) as u32; - let hashed = Preimage::pick(hash, len); - let name: [u8; 32] = hash.as_ref().try_into().unwrap(); - - let address = Scheduler::do_schedule_named( - name, - DispatchTime::At(4), - None, - 127, - root(), - hashed.clone(), - ) - .unwrap(); - assert!(Preimage::is_requested(&hash)); - assert!(Lookup::::contains_key(name)); - - // Run to a very large block. - run_to_block(10); - // It was not executed. - assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); - // Postponing removes the lookup. - assert!(!Lookup::::contains_key(name)); - - // The agenda still contains the call. - let agenda = Agenda::::iter().collect::>(); - assert_eq!(agenda.len(), 1); - assert_eq!( - agenda[0].1, - vec![Some(Scheduled { - maybe_id: Some(name), - priority: 127, - call: hashed, - maybe_periodic: None, - origin: root().into(), - _phantom: Default::default(), - })] - ); - - // Finally add the preimage. - assert_ok!(Preimage::note(call.encode().into())); - run_to_block(1000); - // It did not execute. - assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); - - // Manually re-schedule the call by name does not work. - assert_err!( - Scheduler::do_reschedule_named(name, DispatchTime::At(1001)), - Error::::NotFound - ); - // Manually re-scheduling the call by address errors. - assert_err!( - Scheduler::do_reschedule(address, DispatchTime::At(1001)), - Error::::Named - ); - }); -} - -/// Using the scheduler as `v3::Anon` works. -#[test] -fn scheduler_v3_anon_basic_works() { - use frame_support::traits::schedule::v3::Anon; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - - // Schedule a call. - let _address = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - // Executes in block 4. - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - // ... but not again. - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -#[test] -fn scheduler_v3_anon_cancel_works() { - use frame_support::traits::schedule::v3::Anon; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - - // Schedule a call. - let address = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - // Cancel the call. - assert_ok!(>::cancel(address)); - // It did not get executed. - run_to_block(100); - assert!(logger::log().is_empty()); - // Cannot cancel again. - assert_err!(>::cancel(address), DispatchError::Unavailable); - }); -} - -#[test] -fn scheduler_v3_anon_reschedule_works() { - use frame_support::traits::schedule::v3::Anon; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - - // Schedule a call. - let address = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - - // Cannot re-schedule into the same block. - assert_noop!( - >::reschedule(address, DispatchTime::At(4)), - Error::::RescheduleNoChange - ); - // Cannot re-schedule into the past. - assert_noop!( - >::reschedule(address, DispatchTime::At(3)), - Error::::TargetBlockNumberInPast - ); - // Re-schedule to block 5. - assert_ok!(>::reschedule(address, DispatchTime::At(5))); - // Scheduled for block 5. - run_to_block(4); - assert!(logger::log().is_empty()); - run_to_block(5); - // Does execute in block 5. - assert_eq!(logger::log(), vec![(root(), 42)]); - // Cannot re-schedule executed task. - assert_noop!( - >::reschedule(address, DispatchTime::At(10)), - DispatchError::Unavailable - ); - }); -} - -/// Cancelling a call and then scheduling a second call for the same -/// block results in different addresses. -#[test] -fn scheduler_v3_anon_schedule_does_not_resuse_addr() { - use frame_support::traits::schedule::v3::Anon; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - - // Schedule both calls. - let addr_1 = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call.clone()).unwrap(), - ) - .unwrap(); - // Cancel the call. - assert_ok!(>::cancel(addr_1)); - let addr_2 = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap(), - ) - .unwrap(); - - // Should not re-use the address. - assert!(addr_1 != addr_2); - }); -} - -#[test] -fn scheduler_v3_anon_next_schedule_time_works() { - use frame_support::traits::schedule::v3::Anon; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - - // Schedule a call. - let address = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - - // Scheduled for block 4. - assert_eq!(>::next_dispatch_time(address), Ok(4)); - // Block 4 executes it. - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42)]); - - // It has no dispatch time anymore. - assert_noop!( - >::next_dispatch_time(address), - DispatchError::Unavailable - ); - }); -} - -/// Re-scheduling a task changes its next dispatch time. -#[test] -fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() { - use frame_support::traits::schedule::v3::Anon; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - - // Schedule a call. - let old_address = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - - // Scheduled for block 4. - assert_eq!(>::next_dispatch_time(old_address), Ok(4)); - // Re-schedule to block 5. - let address = - >::reschedule(old_address, DispatchTime::At(5)).unwrap(); - assert!(address != old_address); - // Scheduled for block 5. - assert_eq!(>::next_dispatch_time(address), Ok(5)); - - // Block 4 does nothing. - run_to_block(4); - assert!(logger::log().is_empty()); - // Block 5 executes it. - run_to_block(5); - assert_eq!(logger::log(), vec![(root(), 42)]); - }); -} - -#[test] -fn scheduler_v3_anon_schedule_agenda_overflows() { - use frame_support::traits::schedule::v3::Anon; - let max: u32 = ::MaxScheduledPerBlock::get(); - - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - - // Schedule the maximal number allowed per block. - for _ in 0..max { - >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - } - - // One more time and it errors. - assert_noop!( - >::schedule(DispatchTime::At(4), None, 127, root(), bound,), - DispatchError::Exhausted - ); - - run_to_block(4); - // All scheduled calls are executed. - assert_eq!(logger::log().len() as u32, max); - }); -} - -/// Cancelling and scheduling does not overflow the agenda but fills holes. -#[test] -fn scheduler_v3_anon_cancel_and_schedule_fills_holes() { - use frame_support::traits::schedule::v3::Anon; - let max: u32 = ::MaxScheduledPerBlock::get(); - assert!(max > 3, "This test only makes sense for MaxScheduledPerBlock > 3"); - - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - let mut addrs = Vec::<_>::default(); - - // Schedule the maximal number allowed per block. - for _ in 0..max { - addrs.push( - >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(), - ); - } - // Cancel three of them. - for addr in addrs.into_iter().take(3) { - >::cancel(addr).unwrap(); - } - // Schedule three new ones. - for i in 0..3 { - let (_block, index) = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - assert_eq!(i, index); - } - - run_to_block(4); - // Maximum number of calls are executed. - assert_eq!(logger::log().len() as u32, max); - }); -} - -/// Re-scheduling does not overflow the agenda but fills holes. -#[test] -fn scheduler_v3_anon_reschedule_fills_holes() { - use frame_support::traits::schedule::v3::Anon; - let max: u32 = ::MaxScheduledPerBlock::get(); - assert!(max > 3, "pre-condition: This test only makes sense for MaxScheduledPerBlock > 3"); - - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - let mut addrs = Vec::<_>::default(); - - // Schedule the maximal number allowed per block. - for _ in 0..max { - addrs.push( - >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(), - ); - } - let mut new_addrs = Vec::<_>::default(); - // Reversed last three elements of block 4. - let last_three = addrs.into_iter().rev().take(3).collect::>(); - // Re-schedule three of them to block 5. - for addr in last_three.iter().cloned() { - new_addrs - .push(>::reschedule(addr, DispatchTime::At(5)).unwrap()); - } - // Re-scheduling them back into block 3 should result in the same addrs. - for (old, want) in new_addrs.into_iter().zip(last_three.into_iter().rev()) { - let new = >::reschedule(old, DispatchTime::At(4)).unwrap(); - assert_eq!(new, want); - } - - run_to_block(4); - // Maximum number of calls are executed. - assert_eq!(logger::log().len() as u32, max); - }); -} - -/// Re-scheduling into the same block produces a different address -/// if there is still space in the agenda. -#[test] -fn scheduler_v3_anon_reschedule_does_not_resuse_addr_if_agenda_not_full() { - use frame_support::traits::schedule::v3::Anon; - let max: u32 = ::MaxScheduledPerBlock::get(); - assert!(max > 1, "This test only makes sense for MaxScheduledPerBlock > 1"); - - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - - // Schedule both calls. - let addr_1 = >::schedule( - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call.clone()).unwrap(), - ) - .unwrap(); - // Cancel the call. - assert_ok!(>::cancel(addr_1)); - let addr_2 = >::schedule( - DispatchTime::At(5), - None, - 127, - root(), - Preimage::bound(call).unwrap(), - ) - .unwrap(); - // Re-schedule `call` to block 4. - let addr_3 = >::reschedule(addr_2, DispatchTime::At(4)).unwrap(); - - // Should not re-use the address. - assert!(addr_1 != addr_3); - }); -} - -/// The scheduler can be used as `v3::Named` trait. -#[test] -fn scheduler_v3_named_basic_works() { - use frame_support::traits::schedule::v3::Named; - - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let name = [1u8; 32]; - - // Schedule a call. - let _address = >::schedule_named( - name, - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - // Executes in block 4. - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - // ... but not again. - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -/// A named task can be cancelled by its name. -#[test] -fn scheduler_v3_named_cancel_named_works() { - use frame_support::traits::schedule::v3::Named; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - let name = [1u8; 32]; - - // Schedule a call. - >::schedule_named( - name, - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - // Cancel the call by name. - assert_ok!(>::cancel_named(name)); - // It did not get executed. - run_to_block(100); - assert!(logger::log().is_empty()); - // Cannot cancel again. - assert_noop!(>::cancel_named(name), DispatchError::Unavailable); - }); -} - -/// A named task can also be cancelled by its address. -#[test] -fn scheduler_v3_named_cancel_without_name_works() { - use frame_support::traits::schedule::v3::{Anon, Named}; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - let name = [1u8; 32]; - - // Schedule a call. - let address = >::schedule_named( - name, - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - // Cancel the call by address. - assert_ok!(>::cancel(address)); - // It did not get executed. - run_to_block(100); - assert!(logger::log().is_empty()); - // Cannot cancel again. - assert_err!(>::cancel(address), DispatchError::Unavailable); - }); -} - -/// A named task can be re-scheduled by its name but not by its address. -#[test] -fn scheduler_v3_named_reschedule_named_works() { - use frame_support::traits::schedule::v3::{Anon, Named}; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let name = [1u8; 32]; - - // Schedule a call. - let address = >::schedule_named( - name, - DispatchTime::At(4), - None, - 127, - root(), - Preimage::bound(call).unwrap(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - - // Cannot re-schedule by address. - assert_noop!( - >::reschedule(address, DispatchTime::At(10)), - Error::::Named, - ); - // Cannot re-schedule into the same block. - assert_noop!( - >::reschedule_named(name, DispatchTime::At(4)), - Error::::RescheduleNoChange - ); - // Cannot re-schedule into the past. - assert_noop!( - >::reschedule_named(name, DispatchTime::At(3)), - Error::::TargetBlockNumberInPast - ); - // Re-schedule to block 5. - assert_ok!(>::reschedule_named(name, DispatchTime::At(5))); - // Scheduled for block 5. - run_to_block(4); - assert!(logger::log().is_empty()); - run_to_block(5); - // Does execute in block 5. - assert_eq!(logger::log(), vec![(root(), 42)]); - // Cannot re-schedule executed task. - assert_noop!( - >::reschedule_named(name, DispatchTime::At(10)), - DispatchError::Unavailable - ); - // Also not by address. - assert_noop!( - >::reschedule(address, DispatchTime::At(10)), - DispatchError::Unavailable - ); - }); -} - -#[test] -fn scheduler_v3_named_next_schedule_time_works() { - use frame_support::traits::schedule::v3::{Anon, Named}; - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); - let bound = Preimage::bound(call).unwrap(); - let name = [1u8; 32]; - - // Schedule a call. - let address = >::schedule_named( - name, - DispatchTime::At(4), - None, - 127, - root(), - bound.clone(), - ) - .unwrap(); - - run_to_block(3); - // Did not execute till block 3. - assert!(logger::log().is_empty()); - - // Scheduled for block 4. - assert_eq!(>::next_dispatch_time(name), Ok(4)); - // Also works by address. - assert_eq!(>::next_dispatch_time(address), Ok(4)); - // Block 4 executes it. - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42)]); - - // It has no dispatch time anymore. - assert_noop!( - >::next_dispatch_time(name), - DispatchError::Unavailable - ); - // Also not by address. - assert_noop!( - >::next_dispatch_time(address), - DispatchError::Unavailable - ); - }); -} diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 5b86e7a143e7a..dd7ed8104420d 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/scheduler/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/scheduler/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,14 +44,16 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_scheduler. pub trait WeightInfo { - fn service_agendas_base() -> Weight; - fn service_agenda_base(s: u32, ) -> Weight; - fn service_task_base() -> Weight; - fn service_task_fetched(s: u32, ) -> Weight; - fn service_task_named() -> Weight; - fn service_task_periodic() -> Weight; - fn execute_dispatch_signed() -> Weight; - fn execute_dispatch_unsigned() -> Weight; + fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight; + fn on_initialize_named_resolved(s: u32, ) -> Weight; + fn on_initialize_periodic_resolved(s: u32, ) -> Weight; + fn on_initialize_resolved(s: u32, ) -> Weight; + fn on_initialize_named_aborted(s: u32, ) -> Weight; + fn on_initialize_aborted(s: u32, ) -> Weight; + fn on_initialize_periodic_named(s: u32, ) -> Weight; + fn on_initialize_periodic(s: u32, ) -> Weight; + fn on_initialize_named(s: u32, ) -> Weight; + fn on_initialize(s: u32, ) -> Weight; fn schedule(s: u32, ) -> Weight; fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; @@ -64,194 +63,300 @@ pub trait WeightInfo { /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Scheduler IncompleteSince (r:1 w:1) - fn service_agendas_base() -> Weight { - // Minimum execution time: 5_131 nanoseconds. - Weight::from_ref_time(5_286_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { + (9_994_000 as Weight) + // Standard Error: 20_000 + .saturating_add((19_843_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(s as Weight))) } // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[0, 512]`. - fn service_agenda_base(s: u32, ) -> Weight { - // Minimum execution time: 4_111 nanoseconds. - Weight::from_ref_time(8_763_440 as u64) - // Standard Error: 783 - .saturating_add(Weight::from_ref_time(372_339 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_resolved(s: u32, ) -> Weight { + (10_318_000 as Weight) + // Standard Error: 17_000 + .saturating_add((15_451_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) } - fn service_task_base() -> Weight { - // Minimum execution time: 10_880 nanoseconds. - Weight::from_ref_time(11_194_000 as u64) + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + fn on_initialize_periodic_resolved(s: u32, ) -> Weight { + (11_675_000 as Weight) + // Standard Error: 17_000 + .saturating_add((17_019_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) } + // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - /// The range of component `s` is `[128, 4194304]`. - fn service_task_fetched(s: u32, ) -> Weight { - // Minimum execution time: 25_347 nanoseconds. - Weight::from_ref_time(25_717_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_128 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + fn on_initialize_resolved(s: u32, ) -> Weight { + (11_934_000 as Weight) + // Standard Error: 11_000 + .saturating_add((14_134_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_aborted(s: u32, ) -> Weight { + (7_279_000 as Weight) + // Standard Error: 5_000 + .saturating_add((5_388_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + fn on_initialize_aborted(s: u32, ) -> Weight { + (8_619_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_969_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } + // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) - fn service_task_named() -> Weight { - // Minimum execution time: 12_894 nanoseconds. - Weight::from_ref_time(13_108_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn on_initialize_periodic_named(s: u32, ) -> Weight { + (16_129_000 as Weight) + // Standard Error: 7_000 + .saturating_add((9_772_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) } - fn service_task_periodic() -> Weight { - // Minimum execution time: 10_667 nanoseconds. - Weight::from_ref_time(10_908_000 as u64) + // Storage: Scheduler Agenda (r:2 w:2) + fn on_initialize_periodic(s: u32, ) -> Weight { + (15_785_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_208_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } - fn execute_dispatch_signed() -> Weight { - // Minimum execution time: 4_124 nanoseconds. - Weight::from_ref_time(4_680_000 as u64) + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named(s: u32, ) -> Weight { + (15_778_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_597_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } - fn execute_dispatch_unsigned() -> Weight { - // Minimum execution time: 4_156 nanoseconds. - Weight::from_ref_time(4_361_000 as u64) + // Storage: Scheduler Agenda (r:1 w:1) + fn on_initialize(s: u32, ) -> Weight { + (15_912_000 as Weight) + // Standard Error: 5_000 + .saturating_add((4_530_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - // Minimum execution time: 20_504 nanoseconds. - Weight::from_ref_time(27_066_818 as u64) - // Standard Error: 1_114 - .saturating_add(Weight::from_ref_time(372_897 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_013_000 as Weight) + // Standard Error: 1_000 + .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) - /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - // Minimum execution time: 21_686 nanoseconds. - Weight::from_ref_time(25_696_496 as u64) - // Standard Error: 1_261 - .saturating_add(Weight::from_ref_time(362_498 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (18_131_000 as Weight) + // Standard Error: 1_000 + .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - // Minimum execution time: 23_084 nanoseconds. - Weight::from_ref_time(31_255_518 as u64) - // Standard Error: 1_258 - .saturating_add(Weight::from_ref_time(382_534 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (21_230_000 as Weight) + // Standard Error: 1_000 + .saturating_add((98_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - // Minimum execution time: 23_862 nanoseconds. - Weight::from_ref_time(28_591_336 as u64) - // Standard Error: 742 - .saturating_add(Weight::from_ref_time(369_305 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (20_139_000 as Weight) + // Standard Error: 1_000 + .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Scheduler IncompleteSince (r:1 w:1) - fn service_agendas_base() -> Weight { - // Minimum execution time: 5_131 nanoseconds. - Weight::from_ref_time(5_286_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { + (9_994_000 as Weight) + // Standard Error: 20_000 + .saturating_add((19_843_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((4 as Weight).saturating_mul(s as Weight))) } // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[0, 512]`. - fn service_agenda_base(s: u32, ) -> Weight { - // Minimum execution time: 4_111 nanoseconds. - Weight::from_ref_time(8_763_440 as u64) - // Standard Error: 783 - .saturating_add(Weight::from_ref_time(372_339 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_resolved(s: u32, ) -> Weight { + (10_318_000 as Weight) + // Standard Error: 17_000 + .saturating_add((15_451_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) } - fn service_task_base() -> Weight { - // Minimum execution time: 10_880 nanoseconds. - Weight::from_ref_time(11_194_000 as u64) + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) + fn on_initialize_periodic_resolved(s: u32, ) -> Weight { + (11_675_000 as Weight) + // Standard Error: 17_000 + .saturating_add((17_019_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) } + // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - /// The range of component `s` is `[128, 4194304]`. - fn service_task_fetched(s: u32, ) -> Weight { - // Minimum execution time: 25_347 nanoseconds. - Weight::from_ref_time(25_717_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_128 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + fn on_initialize_resolved(s: u32, ) -> Weight { + (11_934_000 as Weight) + // Standard Error: 11_000 + .saturating_add((14_134_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named_aborted(s: u32, ) -> Weight { + (7_279_000 as Weight) + // Standard Error: 5_000 + .saturating_add((5_388_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Scheduler Agenda (r:2 w:2) + // Storage: Preimage PreimageFor (r:1 w:0) + fn on_initialize_aborted(s: u32, ) -> Weight { + (8_619_000 as Weight) + // Standard Error: 4_000 + .saturating_add((2_969_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) - fn service_task_named() -> Weight { - // Minimum execution time: 12_894 nanoseconds. - Weight::from_ref_time(13_108_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn on_initialize_periodic_named(s: u32, ) -> Weight { + (16_129_000 as Weight) + // Standard Error: 7_000 + .saturating_add((9_772_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) } - fn service_task_periodic() -> Weight { - // Minimum execution time: 10_667 nanoseconds. - Weight::from_ref_time(10_908_000 as u64) + // Storage: Scheduler Agenda (r:2 w:2) + fn on_initialize_periodic(s: u32, ) -> Weight { + (15_785_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_208_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } - fn execute_dispatch_signed() -> Weight { - // Minimum execution time: 4_124 nanoseconds. - Weight::from_ref_time(4_680_000 as u64) + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn on_initialize_named(s: u32, ) -> Weight { + (15_778_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_597_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } - fn execute_dispatch_unsigned() -> Weight { - // Minimum execution time: 4_156 nanoseconds. - Weight::from_ref_time(4_361_000 as u64) + // Storage: Scheduler Agenda (r:1 w:1) + fn on_initialize(s: u32, ) -> Weight { + (15_912_000 as Weight) + // Standard Error: 5_000 + .saturating_add((4_530_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - // Minimum execution time: 20_504 nanoseconds. - Weight::from_ref_time(27_066_818 as u64) - // Standard Error: 1_114 - .saturating_add(Weight::from_ref_time(372_897 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_013_000 as Weight) + // Standard Error: 1_000 + .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) - /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - // Minimum execution time: 21_686 nanoseconds. - Weight::from_ref_time(25_696_496 as u64) - // Standard Error: 1_261 - .saturating_add(Weight::from_ref_time(362_498 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (18_131_000 as Weight) + // Standard Error: 1_000 + .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - // Minimum execution time: 23_084 nanoseconds. - Weight::from_ref_time(31_255_518 as u64) - // Standard Error: 1_258 - .saturating_add(Weight::from_ref_time(382_534 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (21_230_000 as Weight) + // Standard Error: 1_000 + .saturating_add((98_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) - /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - // Minimum execution time: 23_862 nanoseconds. - Weight::from_ref_time(28_591_336 as u64) - // Standard Error: 742 - .saturating_add(Weight::from_ref_time(369_305 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (20_139_000 as Weight) + // Standard Error: 1_000 + .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index 455bae24e7951..56c6af916ecd0 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -58,7 +58,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let _ = >::submit_candidacy( - T::RuntimeOrigin::from(Some(who.clone()).into()) + T::Origin::from(Some(who.clone()).into()) ); Ok(()) } diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index a015c1c568153..abdb9b2acc9b5 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -75,7 +75,7 @@ //! let who = ensure_signed(origin)?; //! //! let _ = >::submit_candidacy( -//! T::RuntimeOrigin::from(Some(who.clone()).into()) +//! T::Origin::from(Some(who.clone()).into()) //! ); //! Ok(()) //! } @@ -98,11 +98,10 @@ mod mock; #[cfg(test)] mod tests; -use codec::{FullCodec, MaxEncodedLen}; +use codec::FullCodec; use frame_support::{ ensure, traits::{ChangeMembers, Currency, Get, InitializeMembers, ReservableCurrency}, - BoundedVec, }; pub use pallet::*; use sp_runtime::traits::{AtLeast32Bit, StaticLookup, Zero}; @@ -110,13 +109,7 @@ use sp_std::{fmt::Debug, prelude::*}; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type PoolT = BoundedVec< - (::AccountId, Option<>::Score>), - >::MaximumMembers, ->; -type MembersT = - BoundedVec<::AccountId, >::MaximumMembers>; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type PoolT = Vec<(::AccountId, Option<>::Score>)>; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated @@ -136,6 +129,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] @@ -143,10 +137,6 @@ pub mod pallet { /// The currency used for deposits. type Currency: Currency + ReservableCurrency; - /// Maximum members length allowed. - #[pallet::constant] - type MaximumMembers: Get; - /// The score attributed to a member or candidate. type Score: AtLeast32Bit + Clone @@ -155,12 +145,10 @@ pub mod pallet { + FullCodec + MaybeSerializeDeserialize + Debug - + scale_info::TypeInfo - + MaxEncodedLen; + + scale_info::TypeInfo; /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; // The deposit which is reserved from candidates if they want to // start a candidacy. The deposit gets returned when the candidacy is @@ -183,13 +171,13 @@ pub mod pallet { type MembershipChanged: ChangeMembers; /// Allows a configurable origin type to set a score to a candidate in the pool. - type ScoreOrigin: EnsureOrigin; + type ScoreOrigin: EnsureOrigin; /// Required origin for removing a member (though can always be Root). /// Configurable origin which enables removing an entity. If the entity /// is part of the `Members` it is immediately replaced by the next /// highest scoring candidate, if available. - type KickOrigin: EnsureOrigin; + type KickOrigin: EnsureOrigin; } #[pallet::event] @@ -218,11 +206,9 @@ pub mod pallet { InvalidIndex, /// Index does not match requested account. WrongAccountIndex, - /// Number of members exceeds `MaximumMembers`. - TooManyMembers, } - /// The current pool of candidates, stored as an ordered Bounded Vec + /// The current pool of candidates, stored as an ordered Vec /// (ordered descending by score, `None` last, highest first). #[pallet::storage] #[pallet::getter(fn pool)] @@ -242,7 +228,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn members)] pub(crate) type Members, I: 'static = ()> = - StorageValue<_, MembersT, ValueQuery>; + StorageValue<_, Vec, ValueQuery>; /// Size of the `Members` set. #[pallet::storage] @@ -276,10 +262,10 @@ pub mod pallet { }); // Sorts the `Pool` by score in a descending order. Entities which - // have a score of `None` are sorted to the end of the bounded vec. + // have a score of `None` are sorted to the beginning of the vec. pool.sort_by_key(|(_, maybe_score)| Reverse(maybe_score.unwrap_or_default())); - >::update_member_count(self.member_count) - .expect("Number of allowed members exceeded"); + + >::put(self.member_count); >::put(&pool); >::refresh_members(pool, ChangeReceiver::MembershipInitialized); } @@ -294,7 +280,7 @@ pub mod pallet { let pool = >::get(); >::refresh_members(pool, ChangeReceiver::MembershipChanged); } - Weight::zero() + 0 } } @@ -321,8 +307,7 @@ pub mod pallet { // can be inserted as last element in pool, since entities with // `None` are always sorted to the end. - >::try_append((who.clone(), Option::<>::Score>::None)) - .map_err(|_| Error::::TooManyMembers)?; + >::append((who.clone(), Option::<>::Score>::None)); >::insert(&who, true); @@ -361,7 +346,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn kick( origin: OriginFor, - dest: AccountIdLookupOf, + dest: ::Source, index: u32, ) -> DispatchResult { T::KickOrigin::ensure_origin(origin)?; @@ -385,7 +370,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn score( origin: OriginFor, - dest: AccountIdLookupOf, + dest: ::Source, index: u32, score: T::Score, ) -> DispatchResult { @@ -408,7 +393,7 @@ pub mod pallet { Reverse(maybe_score.unwrap_or_default()) }) .unwrap_or_else(|l| l); - pool.try_insert(location, item).map_err(|_| Error::::TooManyMembers)?; + pool.insert(location, item); >::put(&pool); Self::deposit_event(Event::::CandidateScored); @@ -424,7 +409,8 @@ pub mod pallet { #[pallet::weight(0)] pub fn change_member_count(origin: OriginFor, count: u32) -> DispatchResult { ensure_root(origin)?; - Self::update_member_count(count).map_err(Into::into) + MemberCount::::put(&count); + Ok(()) } } } @@ -437,28 +423,23 @@ impl, I: 'static> Pallet { /// type function to invoke at the end of the method. fn refresh_members(pool: PoolT, notify: ChangeReceiver) { let count = MemberCount::::get(); - let old_members = >::get(); - let new_members: Vec = pool + let mut new_members: Vec = pool .into_iter() .filter(|(_, score)| score.is_some()) .take(count as usize) .map(|(account_id, _)| account_id) .collect(); + new_members.sort(); - // It's safe to truncate_from at this point since MemberCount - // is verified that it does not exceed the MaximumMembers value - let mut new_members_bounded: MembersT = BoundedVec::truncate_from(new_members); - - new_members_bounded.sort(); - - >::put(&new_members_bounded); + let old_members = >::get(); + >::put(&new_members); match notify { ChangeReceiver::MembershipInitialized => - T::MembershipInitialized::initialize_members(&new_members_bounded), + T::MembershipInitialized::initialize_members(&new_members), ChangeReceiver::MembershipChanged => - T::MembershipChanged::set_members_sorted(&new_members_bounded[..], &old_members[..]), + T::MembershipChanged::set_members_sorted(&new_members[..], &old_members[..]), } } @@ -504,11 +485,4 @@ impl, I: 'static> Pallet { Ok(()) } - - /// Make sure the new member count value does not exceed the MaximumMembers - fn update_member_count(new_member_count: u32) -> Result<(), Error> { - ensure!(new_member_count <= T::MaximumMembers::get(), Error::::TooManyMembers); - >::put(new_member_count); - Ok(()) - } } diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index d6f653b32ad2d..4fef5385eb2c5 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_scored_pool; use frame_support::{ - bounded_vec, construct_runtime, ord_parameter_types, parameter_types, + ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, GenesisBuild}, }; use frame_system::EnsureSignedBy; @@ -30,11 +30,12 @@ use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; +use std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; -construct_runtime!( +frame_support::construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, @@ -49,7 +50,7 @@ construct_runtime!( parameter_types! { pub const CandidateDeposit: u64 = 25; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { pub const KickOrigin: u64 = 2; @@ -61,16 +62,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -88,21 +89,21 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } -parameter_types! { - pub static MembersTestValue: BoundedVec> = bounded_vec![0,10]; +thread_local! { + pub static MEMBERS: RefCell> = RefCell::new(vec![]); } pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MembersTestValue::get().into_inner(); + let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); old_plus_incoming.extend_from_slice(incoming); old_plus_incoming.sort(); @@ -112,20 +113,18 @@ impl ChangeMembers for TestChangeMembers { assert_eq!(old_plus_incoming, new_plus_outgoing); - MembersTestValue::set(>>::truncate_from(new.to_vec())); + MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); } } impl InitializeMembers for TestChangeMembers { fn initialize_members(new_members: &[u64]) { - MembersTestValue::set(>>::truncate_from( - new_members.to_vec(), - )); + MEMBERS.with(|m| *m.borrow_mut() = new_members.to_vec()); } } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type KickOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; @@ -134,24 +133,25 @@ impl Config for Test { type Period = ConstU64<4>; type Score = u64; type ScoreOrigin = EnsureSignedBy; - type MaximumMembers = ConstU32<10>; } pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let mut balances = vec![]; - for i in 1..31 { - balances.push((i, 500_000)); + pallet_balances::GenesisConfig:: { + balances: vec![ + (5, 500_000), + (10, 500_000), + (15, 500_000), + (20, 500_000), + (31, 500_000), + (40, 500_000), + (99, 1), + ], } - balances.push((31, 500_000)); - balances.push((40, 500_000)); - balances.push((99, 1)); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); + .assimilate_storage(&mut t) + .unwrap(); pallet_scored_pool::GenesisConfig:: { - pool: bounded_vec![(10, Some(1)), (20, Some(2)), (31, Some(2)), (40, Some(3)), (5, None)], + pool: vec![(5, None), (10, Some(1)), (20, Some(2)), (31, Some(2)), (40, Some(3))], member_count: 2, } .assimilate_storage(&mut t) diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 8f4daff47cc44..7b431160ddfe5 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -33,7 +33,7 @@ fn query_membership_works() { assert_eq!(ScoredPool::members(), vec![20, 40]); assert_eq!(Balances::reserved_balance(31), CandidateDeposit::get()); assert_eq!(Balances::reserved_balance(40), CandidateDeposit::get()); - assert_eq!(MembersTestValue::get().clone(), vec![20, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![20, 40]); }); } @@ -41,11 +41,11 @@ fn query_membership_works() { fn submit_candidacy_must_not_work() { new_test_ext().execute_with(|| { assert_noop!( - ScoredPool::submit_candidacy(RuntimeOrigin::signed(99)), + ScoredPool::submit_candidacy(Origin::signed(99)), pallet_balances::Error::::InsufficientBalance, ); assert_noop!( - ScoredPool::submit_candidacy(RuntimeOrigin::signed(40)), + ScoredPool::submit_candidacy(Origin::signed(40)), Error::::AlreadyInPool ); }); @@ -58,7 +58,7 @@ fn submit_candidacy_works() { let who = 15; // when - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); assert_eq!(fetch_from_pool(15), Some((who, None))); // then @@ -72,11 +72,11 @@ fn scoring_works() { // given let who = 15; let score = 99; - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, score)); + assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, score)); // then assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); @@ -93,7 +93,7 @@ fn scoring_same_element_with_same_score_works() { let score = 2; // when - assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, score)); + assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, score)); // then assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); @@ -109,7 +109,7 @@ fn kicking_works_only_for_authorized() { new_test_ext().execute_with(|| { let who = 40; let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_noop!(ScoredPool::kick(RuntimeOrigin::signed(99), who, index), BadOrigin); + assert_noop!(ScoredPool::kick(Origin::signed(99), who, index), BadOrigin); }); } @@ -123,12 +123,12 @@ fn kicking_works() { // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, index)); + assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); // then assert_eq!(find_in_pool(who), None); assert_eq!(ScoredPool::members(), vec![20, 31]); - assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); assert_eq!(Balances::reserved_balance(who), 0); // deposit must have been returned }); } @@ -138,21 +138,21 @@ fn unscored_entities_must_not_be_used_for_filling_members() { new_test_ext().execute_with(|| { // given // we submit a candidacy, score will be `None` - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(15))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); // when // we remove every scored member ScoredPool::pool().into_iter().for_each(|(who, score)| { if let Some(_) = score { let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, index)); + assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); } }); // then // the `None` candidates should not have been filled in assert!(ScoredPool::members().is_empty()); - assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); }); } @@ -161,16 +161,16 @@ fn refreshing_works() { new_test_ext().execute_with(|| { // given let who = 15; - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, 99)); + assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99)); // when ScoredPool::refresh_members(ScoredPool::pool(), ChangeReceiver::MembershipChanged); // then assert_eq!(ScoredPool::members(), vec![15, 40]); - assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); }); } @@ -179,9 +179,9 @@ fn refreshing_happens_every_period() { new_test_ext().execute_with(|| { // given System::set_block_number(1); - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(15))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); let index = find_in_pool(15).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), 15, index, 99)); + assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), 15, index, 99)); assert_eq!(ScoredPool::members(), vec![20, 40]); // when @@ -190,7 +190,7 @@ fn refreshing_happens_every_period() { // then assert_eq!(ScoredPool::members(), vec![15, 40]); - assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); }); } @@ -200,7 +200,7 @@ fn withdraw_candidacy_must_only_work_for_members() { let who = 77; let index = 0; assert_noop!( - ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index), + ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex ); }); @@ -212,15 +212,15 @@ fn oob_index_should_abort() { let who = 40; let oob_index = ScoredPool::pool().len() as u32; assert_noop!( - ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), oob_index), + ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), Error::::InvalidIndex ); assert_noop!( - ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, oob_index, 99), + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), Error::::InvalidIndex ); assert_noop!( - ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, oob_index), + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), Error::::InvalidIndex ); }); @@ -232,15 +232,15 @@ fn index_mismatches_should_abort() { let who = 40; let index = 3; assert_noop!( - ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index), + ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex ); assert_noop!( - ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, 99), + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), Error::::WrongAccountIndex ); assert_noop!( - ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, index), + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), Error::::WrongAccountIndex ); }); @@ -254,7 +254,7 @@ fn withdraw_unscored_candidacy_must_work() { // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index)); + assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); // then assert_eq!(fetch_from_pool(5), None); @@ -270,7 +270,7 @@ fn withdraw_scored_candidacy_must_work() { // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index)); + assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); // then assert_eq!(fetch_from_pool(who), None); @@ -286,36 +286,14 @@ fn candidacy_resubmitting_works() { let who = 15; // when - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); assert_eq!(ScoredPool::candidate_exists(who), true); let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index)); + assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); assert_eq!(ScoredPool::candidate_exists(who), false); - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); // then assert_eq!(ScoredPool::candidate_exists(who), true); }); } - -#[test] -fn pool_candidates_exceeded() { - new_test_ext().execute_with(|| { - for i in [1, 2, 3, 4, 6] { - let who = i as u64; - assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score( - RuntimeOrigin::signed(ScoreOrigin::get()), - who, - index, - 99 - )); - } - - assert_noop!( - ScoredPool::submit_candidacy(RuntimeOrigin::signed(8)), - Error::::TooManyMembers - ); - }); -} diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 5b2fc0c9e1ebf..930ddb0ce7057 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -18,7 +18,7 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } -pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } @@ -46,7 +46,3 @@ std = [ "sp-session/std", "sp-std/std", ] - -runtime-benchmarks = [ - "pallet-staking/runtime-benchmarks", -] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 9e478ada53cf2..265c35cbe4908 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -18,7 +18,6 @@ //! Benchmarks for the Session Pallet. // This is separated into its own crate due to cyclic dependency issues. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] mod mock; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 2db7eb385111c..2181493f72947 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -52,16 +52,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -79,7 +79,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; @@ -125,7 +125,7 @@ impl pallet_session::Config for Test { type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; type SessionHandler = TestSessionHandler; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type WeightInfo = (); @@ -150,9 +150,6 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { @@ -162,7 +159,7 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -174,12 +171,10 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); - type ElectionProvider = onchain::OnChainExecution; + type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type TargetList = pallet_staking::UseValidatorsMap; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 45b4ba3c0a799..2a749f2aae9dd 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -39,8 +39,8 @@ use sp_session::{MembershipProof, ValidatorCount}; use sp_staking::SessionIndex; use sp_std::prelude::*; use sp_trie::{ - trie_types::{TrieDBBuilder, TrieDBMutBuilderV0}, - LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, + trie_types::{TrieDB, TrieDBMutV0}, + MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, }; use frame_support::{ @@ -236,7 +236,7 @@ impl ProvingTrie { let mut root = Default::default(); { - let mut trie = TrieDBMutBuilderV0::new(&mut db, &mut root).build(); + let mut trie = TrieDBMutV0::new(&mut db, &mut root); for (i, (validator, full_id)) in validators.into_iter().enumerate() { let i = i as u32; let keys = match >::load_keys(&validator) { @@ -278,20 +278,19 @@ impl ProvingTrie { /// Prove the full verification data for a given key and key ID. pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { - let mut recorder = Recorder::>::new(); - { - let trie = - TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build(); - let val_idx = (key_id, key_data).using_encoded(|s| { - trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok()) - })?; - - val_idx.using_encoded(|s| { - trie.get(s) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - })?; - } + let trie = TrieDB::new(&self.db, &self.root).ok()?; + let mut recorder = Recorder::new(); + let val_idx = (key_id, key_data).using_encoded(|s| { + trie.get_with(s, &mut recorder) + .ok()? + .and_then(|raw| u32::decode(&mut &*raw).ok()) + })?; + + val_idx.using_encoded(|s| { + trie.get_with(s, &mut recorder) + .ok()? + .and_then(|raw| >::decode(&mut &*raw).ok()) + })?; Some(recorder.drain().into_iter().map(|r| r.data).collect()) } @@ -304,7 +303,7 @@ impl ProvingTrie { // Check a proof contained within the current memory-db. Returns `None` if the // nodes within the current `MemoryDB` are insufficient to query the item. fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDBBuilder::new(&self.db, &self.root).build(); + let trie = TrieDB::new(&self.db, &self.root).ok()?; let val_idx = (key_id, key_data) .using_encoded(|s| trie.get(s)) .ok()? @@ -375,7 +374,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keys: Vec<_> = NextValidators::get() - .iter() - .cloned() - .map(|i| (i, i, UintAuthorityId(i).into())) - .collect(); + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + }); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index ececb8af5ad58..95813d0a70272 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -141,7 +141,7 @@ mod tests { use super::*; use crate::{ historical::{onchain, Pallet}, - mock::{force_new_session, set_next_validators, NextValidators, Session, System, Test}, + mock::{force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS}, }; use codec::Encode; @@ -163,12 +163,9 @@ mod tests { .build_storage::() .expect("Failed to create test externalities."); - let keys: Vec<_> = NextValidators::get() - .iter() - .cloned() - .map(|i| (i, i, UintAuthorityId(i).into())) - .collect(); - + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + }); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 7b97a20860175..71ee9d1e0758a 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -379,7 +379,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From + IsType<::RuntimeEvent>; + type Event: From + IsType<::Event>; /// A stable ID for a validator. type ValidatorId: Member @@ -574,7 +574,7 @@ pub mod pallet { // NOTE: the non-database part of the weight for `should_end_session(n)` is // included as weight for empty block, the database part is expected to be in // cache. - Weight::zero() + 0 } } } diff --git a/frame/session/src/migrations/v1.rs b/frame/session/src/migrations/v1.rs index c0dce422fe8b5..3c687ea7d9d66 100644 --- a/frame/session/src/migrations/v1.rs +++ b/frame/session/src/migrations/v1.rs @@ -47,7 +47,7 @@ pub fn migrate::on_chain_storage_version(); @@ -82,7 +82,7 @@ pub fn migrate = vec![1, 2, 3]; - pub static NextValidators: Vec = vec![1, 2, 3]; - pub static Authorities: Vec = - vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]; - pub static ForceSessionEnd: bool = false; - pub static SessionLength: u64 = 2; - pub static SessionChanged: bool = false; - pub static TestSessionChanged: bool = false; - pub static Disabled: bool = false; +thread_local! { + pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); + pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); + pub static AUTHORITIES: RefCell> = + RefCell::new(vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + pub static FORCE_SESSION_END: RefCell = RefCell::new(false); + pub static SESSION_LENGTH: RefCell = RefCell::new(2); + pub static SESSION_CHANGED: RefCell = RefCell::new(false); + pub static TEST_SESSION_CHANGED: RefCell = RefCell::new(false); + pub static DISABLED: RefCell = RefCell::new(false); // Stores if `on_before_session_end` was called - pub static BeforeSessionEndCalled: bool = false; - pub static ValidatorAccounts: BTreeMap = BTreeMap::new(); + pub static BEFORE_SESSION_END_CALLED: RefCell = RefCell::new(false); + pub static VALIDATOR_ACCOUNTS: RefCell> = RefCell::new(BTreeMap::new()); } pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { fn should_end_session(now: u64) -> bool { - let l = SessionLength::get(); + let l = SESSION_LENGTH.with(|l| *l.borrow()); now % l == 0 || - ForceSessionEnd::mutate(|l| { - let r = *l; - *l = false; + FORCE_SESSION_END.with(|l| { + let r = *l.borrow(); + *l.borrow_mut() = false; r }) } @@ -140,19 +140,19 @@ impl SessionHandler for TestSessionHandler { validators: &[(u64, T)], _queued_validators: &[(u64, T)], ) { - SessionChanged::mutate(|l| *l = changed); - Authorities::mutate(|l| { - *l = validators + SESSION_CHANGED.with(|l| *l.borrow_mut() = changed); + AUTHORITIES.with(|l| { + *l.borrow_mut() = validators .iter() .map(|(_, id)| id.get::(DUMMY).unwrap_or_default()) .collect() }); } fn on_disabled(_validator_index: u32) { - Disabled::mutate(|l| *l = true) + DISABLED.with(|l| *l.borrow_mut() = true) } fn on_before_session_ending() { - BeforeSessionEndCalled::mutate(|b| *b = true); + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = true); } } @@ -161,15 +161,16 @@ impl SessionManager for TestSessionManager { fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} fn new_session(_: SessionIndex) -> Option> { - if !TestSessionChanged::get() { - Validators::mutate(|v| { - *v = NextValidators::get().clone(); + if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { + VALIDATORS.with(|v| { + let mut v = v.borrow_mut(); + *v = NEXT_VALIDATORS.with(|l| l.borrow().clone()); Some(v.clone()) }) - } else if Disabled::mutate(|l| std::mem::replace(&mut *l, false)) { + } else if DISABLED.with(|l| std::mem::replace(&mut *l.borrow_mut(), false)) { // If there was a disabled validator, underlying conditions have changed // so we return `Some`. - Some(Validators::get().clone()) + Some(VALIDATORS.with(|v| v.borrow().clone())) } else { None } @@ -187,40 +188,37 @@ impl crate::historical::SessionManager for TestSessionManager { } pub fn authorities() -> Vec { - Authorities::get().to_vec() + AUTHORITIES.with(|l| l.borrow().to_vec()) } pub fn force_new_session() { - ForceSessionEnd::mutate(|l| *l = true) + FORCE_SESSION_END.with(|l| *l.borrow_mut() = true) } pub fn set_session_length(x: u64) { - SessionLength::mutate(|l| *l = x) + SESSION_LENGTH.with(|l| *l.borrow_mut() = x) } pub fn session_changed() -> bool { - SessionChanged::get() + SESSION_CHANGED.with(|l| *l.borrow()) } pub fn set_next_validators(next: Vec) { - NextValidators::mutate(|v| *v = next); + NEXT_VALIDATORS.with(|v| *v.borrow_mut() = next); } pub fn before_session_end_called() -> bool { - BeforeSessionEndCalled::get() + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow()) } pub fn reset_before_session_end_called() { - BeforeSessionEndCalled::mutate(|b| *b = false); + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = false); } pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keys: Vec<_> = NextValidators::get() - .iter() - .cloned() - .map(|i| (i, i, UintAuthorityId(i).into())) - .collect(); + let keys: Vec<_> = NEXT_VALIDATORS + .with(|l| l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect()); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); @@ -232,15 +230,16 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pallet_session::GenesisConfig:: { keys } .assimilate_storage(&mut t) .unwrap(); - - let v = NextValidators::get().iter().map(|&i| (i, i)).collect(); - ValidatorAccounts::mutate(|m| *m = v); + NEXT_VALIDATORS.with(|l| { + let v = l.borrow().iter().map(|&i| (i, i)).collect(); + VALIDATOR_ACCOUNTS.with(|m| *m.borrow_mut() = v); + }); sp_io::TestExternalities::new(t) } parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { @@ -248,16 +247,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -280,12 +279,12 @@ impl pallet_timestamp::Config for Test { pub struct TestValidatorIdOf; impl TestValidatorIdOf { pub fn set(v: BTreeMap) { - ValidatorAccounts::mutate(|m| *m = v); + VALIDATOR_ACCOUNTS.with(|m| *m.borrow_mut() = v); } } impl Convert> for TestValidatorIdOf { fn convert(x: u64) -> Option { - ValidatorAccounts::get().get(&x).cloned() + VALIDATOR_ACCOUNTS.with(|m| m.borrow().get(&x).cloned()) } } @@ -299,7 +298,7 @@ impl Config for Test { type ValidatorId = u64; type ValidatorIdOf = TestValidatorIdOf; type Keys = MockSessionKeys; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type NextSessionRotation = (); type WeightInfo = (); } diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 43809cc3a9de0..c9d2dbb53d9ba 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::{ authorities, before_session_end_called, force_new_session, new_test_ext, reset_before_session_end_called, session_changed, set_next_validators, set_session_length, - PreUpgradeMockSessionKeys, RuntimeOrigin, Session, SessionChanged, System, Test, - TestSessionChanged, TestValidatorIdOf, + Origin, PreUpgradeMockSessionKeys, Session, System, Test, TestValidatorIdOf, SESSION_CHANGED, + TEST_SESSION_CHANGED, }; use codec::Decode; @@ -35,7 +35,7 @@ use frame_support::{ }; fn initialize_block(block: u64) { - SessionChanged::mutate(|l| *l = false); + SESSION_CHANGED.with(|l| *l.borrow_mut() = false); System::set_block_number(block); Session::on_initialize(block); } @@ -67,7 +67,7 @@ fn keys_cleared_on_kill() { assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); assert!(System::is_provider_required(&1)); - assert_ok!(Session::purge_keys(RuntimeOrigin::signed(1))); + assert_ok!(Session::purge_keys(Origin::signed(1))); assert!(!System::is_provider_required(&1)); assert_eq!(Session::load_keys(&1), None); @@ -87,8 +87,8 @@ fn purge_keys_works_for_stash_id() { let id = DUMMY; assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - assert_ok!(Session::purge_keys(RuntimeOrigin::signed(10))); - assert_ok!(Session::purge_keys(RuntimeOrigin::signed(2))); + assert_ok!(Session::purge_keys(Origin::signed(10))); + assert_ok!(Session::purge_keys(Origin::signed(2))); assert_eq!(Session::load_keys(&10), None); assert_eq!(Session::load_keys(&20), None); @@ -128,7 +128,7 @@ fn authorities_should_track_validators() { reset_before_session_end_called(); set_next_validators(vec![1, 2, 4]); - assert_ok!(Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(4).into(), vec![])); + assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); force_new_session(); initialize_block(3); assert_eq!( @@ -194,7 +194,7 @@ fn session_change_should_work() { // Block 3: Set new key for validator 2; no visible change. initialize_block(3); - assert_ok!(Session::set_keys(RuntimeOrigin::signed(2), UintAuthorityId(5).into(), vec![])); + assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); // Block 4: Session rollover; no visible change. @@ -219,13 +219,13 @@ fn duplicates_are_not_allowed() { System::set_block_number(1); Session::on_initialize(1); assert_noop!( - Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(1).into(), vec![]), + Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]), Error::::DuplicatedKey, ); - assert_ok!(Session::set_keys(RuntimeOrigin::signed(1), UintAuthorityId(10).into(), vec![])); + assert_ok!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![])); // is fine now that 1 has migrated off. - assert_ok!(Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(1).into(), vec![])); + assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![])); }); } @@ -235,7 +235,7 @@ fn session_changed_flag_works() { new_test_ext().execute_with(|| { TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (69, 69)].into_iter().collect()); - TestSessionChanged::mutate(|l| *l = true); + TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); force_new_session(); initialize_block(1); @@ -268,7 +268,7 @@ fn session_changed_flag_works() { assert!(before_session_end_called()); reset_before_session_end_called(); - assert_ok!(Session::set_keys(RuntimeOrigin::signed(2), UintAuthorityId(5).into(), vec![])); + assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); force_new_session(); initialize_block(6); assert!(!session_changed()); @@ -276,11 +276,7 @@ fn session_changed_flag_works() { reset_before_session_end_called(); // changing the keys of a validator leads to change. - assert_ok!(Session::set_keys( - RuntimeOrigin::signed(69), - UintAuthorityId(69).into(), - vec![] - )); + assert_ok!(Session::set_keys(Origin::signed(69), UintAuthorityId(69).into(), vec![])); force_new_session(); initialize_block(7); assert!(session_changed()); @@ -359,7 +355,7 @@ fn session_keys_generate_output_works_as_set_keys_input() { new_test_ext().execute_with(|| { let new_keys = mock::MockSessionKeys::generate(None); assert_ok!(Session::set_keys( - RuntimeOrigin::signed(2), + Origin::signed(2), ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), vec![], )); @@ -388,8 +384,8 @@ fn upgrade_keys() { use sp_core::crypto::key_types::DUMMY; // This test assumes certain mocks. - assert_eq!(mock::NextValidators::get().clone(), vec![1, 2, 3]); - assert_eq!(mock::Validators::get().clone(), vec![1, 2, 3]); + assert_eq!(mock::NEXT_VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + assert_eq!(mock::VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); new_test_ext().execute_with(|| { let pre_one = PreUpgradeMockSessionKeys { a: [1u8; 32], b: [1u8; 64] }; diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index d29413a33dd17..40ae7f1be4265 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_session //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/session/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/session/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,19 +55,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - // Minimum execution time: 59_046 nanoseconds. - Weight::from_ref_time(59_934_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (48_484_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - // Minimum execution time: 48_872 nanoseconds. - Weight::from_ref_time(49_666_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (38_003_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } } @@ -80,18 +75,16 @@ impl WeightInfo for () { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - // Minimum execution time: 59_046 nanoseconds. - Weight::from_ref_time(59_934_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (48_484_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - // Minimum execution time: 48_872 nanoseconds. - Weight::from_ref_time(49_666_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (38_003_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } } diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 73a09490ea579..2a6428e754b9d 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -281,7 +281,6 @@ type BalanceOf = type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] @@ -378,8 +377,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The societies's pallet id #[pallet::constant] @@ -421,10 +419,10 @@ pub mod pallet { type MaxLockDuration: Get; /// The origin that is allowed to call `found`. - type FounderSetOrigin: EnsureOrigin; + type FounderSetOrigin: EnsureOrigin; /// The origin that is allowed to make suspension judgements. - type SuspensionJudgementOrigin: EnsureOrigin; + type SuspensionJudgementOrigin: EnsureOrigin; /// The number of blocks between membership challenges. #[pallet::constant] @@ -615,7 +613,7 @@ pub mod pallet { fn on_initialize(n: T::BlockNumber) -> Weight { let mut members = vec![]; - let mut weight = Weight::zero(); + let mut weight = 0; let weights = T::BlockWeights::get(); // Run a candidate/membership rotation @@ -825,12 +823,11 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vouch( origin: OriginFor, - who: AccountIdLookupOf, + who: T::AccountId, value: BalanceOf, tip: BalanceOf, ) -> DispatchResult { let voucher = ensure_signed(origin)?; - let who = T::Lookup::lookup(who)?; // Check user is not suspended. ensure!(!>::contains_key(&who), Error::::Suspended); ensure!(!>::contains_key(&who), Error::::Suspended); @@ -917,7 +914,7 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vote( origin: OriginFor, - candidate: AccountIdLookupOf, + candidate: ::Source, approve: bool, ) -> DispatchResult { let voter = ensure_signed(origin)?; @@ -1029,12 +1026,11 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn found( origin: OriginFor, - founder: AccountIdLookupOf, + founder: T::AccountId, max_members: u32, rules: Vec, ) -> DispatchResult { T::FounderSetOrigin::ensure_origin(origin)?; - let founder = T::Lookup::lookup(founder)?; ensure!(!>::exists(), Error::::AlreadyFounded); ensure!(max_members > 1, Error::::MaxMembers); // This should never fail in the context of this function... @@ -1108,11 +1104,10 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_member( origin: OriginFor, - who: AccountIdLookupOf, + who: T::AccountId, forgive: bool, ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; ensure!(>::contains_key(&who), Error::::NotSuspended); if forgive { @@ -1185,11 +1180,10 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_candidate( origin: OriginFor, - who: AccountIdLookupOf, + who: T::AccountId, judgement: Judgement, ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; - let who = T::Lookup::lookup(who)?; if let Some((value, kind)) = >::get(&who) { match judgement { Judgement::Approve => { @@ -1268,19 +1262,19 @@ pub mod pallet { /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureFounder { +impl EnsureOrigin for EnsureFounder { type Success = T::AccountId; - fn try_origin(o: T::RuntimeOrigin) -> Result { + fn try_origin(o: T::Origin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { (frame_system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), - (r, _) => Err(T::RuntimeOrigin::from(r)), + (r, _) => Err(T::Origin::from(r)), }) } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { + fn try_successful_origin() -> Result { let founder = Founder::::get().ok_or(())?; - Ok(T::RuntimeOrigin::from(frame_system::RawOrigin::Signed(founder))) + Ok(T::Origin::from(frame_system::RawOrigin::Signed(founder))) } } diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 0b1b93aeae761..04ea705eed556 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { @@ -63,16 +63,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -90,7 +90,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -98,7 +98,7 @@ impl pallet_balances::Config for Test { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = pallet_balances::Pallet; type Randomness = TestRandomness; type CandidateDeposit = ConstU64<25>; diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index 864735aa10cca..d394ddc9011b0 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -33,9 +33,9 @@ fn founding_works() { assert_eq!(Society::pot(), 0); // Account 1 is set as the founder origin // Account 5 cannot start a society - assert_noop!(Society::found(RuntimeOrigin::signed(5), 20, 100, vec![]), BadOrigin); + assert_noop!(Society::found(Origin::signed(5), 20, 100, vec![]), BadOrigin); // Account 1 can start a society, where 10 is the founding member - assert_ok!(Society::found(RuntimeOrigin::signed(1), 10, 100, b"be cool".to_vec())); + assert_ok!(Society::found(Origin::signed(1), 10, 100, b"be cool".to_vec())); // Society members only include 10 assert_eq!(Society::members(), vec![10]); // 10 is the head of the society @@ -51,7 +51,7 @@ fn founding_works() { assert_eq!(Society::pot(), 1000); // Cannot start another society assert_noop!( - Society::found(RuntimeOrigin::signed(1), 20, 100, vec![]), + Society::found(Origin::signed(1), 20, 100, vec![]), Error::::AlreadyFounded ); }); @@ -61,22 +61,22 @@ fn founding_works() { fn unfounding_works() { EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { // Account 1 sets the founder... - assert_ok!(Society::found(RuntimeOrigin::signed(1), 10, 100, vec![])); + assert_ok!(Society::found(Origin::signed(1), 10, 100, vec![])); // Account 2 cannot unfound it as it's not the founder. - assert_noop!(Society::unfound(RuntimeOrigin::signed(2)), Error::::NotFounder); + assert_noop!(Society::unfound(Origin::signed(2)), Error::::NotFounder); // Account 10 can, though. - assert_ok!(Society::unfound(RuntimeOrigin::signed(10))); + assert_ok!(Society::unfound(Origin::signed(10))); // 1 sets the founder to 20 this time - assert_ok!(Society::found(RuntimeOrigin::signed(1), 20, 100, vec![])); + assert_ok!(Society::found(Origin::signed(1), 20, 100, vec![])); // Bring in a new member... - assert_ok!(Society::bid(RuntimeOrigin::signed(10), 0)); + assert_ok!(Society::bid(Origin::signed(10), 0)); run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(20), 10, true)); + assert_ok!(Society::vote(Origin::signed(20), 10, true)); run_to_block(8); // Unfounding won't work now, even though it's from 20. - assert_noop!(Society::unfound(RuntimeOrigin::signed(20)), Error::::NotHead); + assert_noop!(Society::unfound(Origin::signed(20)), Error::::NotHead); }); } @@ -85,7 +85,7 @@ fn basic_new_member_works() { EnvBuilder::new().execute(|| { assert_eq!(Balances::free_balance(20), 50); // Bid causes Candidate Deposit to be reserved. - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotate period every 4 blocks @@ -93,7 +93,7 @@ fn basic_new_member_works() { // 20 is now a candidate assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // 10 (a member) can vote for the candidate - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); // Rotate period every 4 blocks run_to_block(8); // 20 is now a member of the society @@ -108,10 +108,10 @@ fn basic_new_member_works() { fn bidding_works() { EnvBuilder::new().execute(|| { // Users make bids of various amounts - assert_ok!(Society::bid(RuntimeOrigin::signed(60), 1900)); - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 500)); - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 400)); - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 300)); + assert_ok!(Society::bid(Origin::signed(60), 1900)); + assert_ok!(Society::bid(Origin::signed(50), 500)); + assert_ok!(Society::bid(Origin::signed(40), 400)); + assert_ok!(Society::bid(Origin::signed(30), 300)); // Rotate period run_to_block(4); // Pot is 1000 after "PeriodSpend" @@ -126,8 +126,8 @@ fn bidding_works() { ] ); // A member votes for these candidates to join the society - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 40, true)); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 40, true)); run_to_block(8); // Candidates become members after a period rotation assert_eq!(Society::members(), vec![10, 30, 40]); @@ -137,7 +137,7 @@ fn bidding_works() { // Left over from the original bids is 50 who satisfies the condition of bid less than pot. assert_eq!(Society::candidates(), vec![create_bid(500, 50, BidKind::Deposit(25))]); // 40, now a member, can vote for 50 - assert_ok!(Society::vote(RuntimeOrigin::signed(40), 50, true)); + assert_ok!(Society::vote(Origin::signed(40), 50, true)); run_to_block(12); // 50 is now a member assert_eq!(Society::members(), vec![10, 30, 40, 50]); @@ -146,8 +146,8 @@ fn bidding_works() { assert_eq!(Balances::free_balance(Society::account_id()), 8_800); // No more candidates satisfy the requirements assert_eq!(Society::candidates(), vec![]); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around - // Next period + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + // Next period run_to_block(16); // Same members assert_eq!(Society::members(), vec![10, 30, 40, 50]); @@ -158,7 +158,7 @@ fn bidding_works() { // Candidate 60 now qualifies based on the increased pot size. assert_eq!(Society::candidates(), vec![create_bid(1900, 60, BidKind::Deposit(25))]); // Candidate 60 is voted in. - assert_ok!(Society::vote(RuntimeOrigin::signed(50), 60, true)); + assert_ok!(Society::vote(Origin::signed(50), 60, true)); run_to_block(20); // 60 joins as a member assert_eq!(Society::members(), vec![10, 30, 40, 50, 60]); @@ -172,15 +172,15 @@ fn bidding_works() { fn unbidding_works() { EnvBuilder::new().execute(|| { // 20 and 30 make bids - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); + assert_ok!(Society::bid(Origin::signed(20), 1000)); + assert_ok!(Society::bid(Origin::signed(30), 0)); // Balances are reserved assert_eq!(Balances::free_balance(30), 25); assert_eq!(Balances::reserved_balance(30), 25); // Must know right position to unbid + cannot unbid someone else - assert_noop!(Society::unbid(RuntimeOrigin::signed(30), 1), Error::::BadPosition); + assert_noop!(Society::unbid(Origin::signed(30), 1), Error::::BadPosition); // Can unbid themselves with the right position - assert_ok!(Society::unbid(RuntimeOrigin::signed(30), 0)); + assert_ok!(Society::unbid(Origin::signed(30), 0)); // Balance is returned assert_eq!(Balances::free_balance(30), 50); assert_eq!(Balances::reserved_balance(30), 0); @@ -195,15 +195,15 @@ fn payout_works() { EnvBuilder::new().execute(|| { // Original balance of 50 assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); + assert_ok!(Society::bid(Origin::signed(20), 1000)); run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); run_to_block(8); // payout not ready - assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NoPayout); + assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); run_to_block(9); // payout should be here - assert_ok!(Society::payout(RuntimeOrigin::signed(20))); + assert_ok!(Society::payout(Origin::signed(20))); assert_eq!(Balances::free_balance(20), 1050); }); } @@ -212,7 +212,7 @@ fn payout_works() { fn basic_new_member_skeptic_works() { EnvBuilder::new().execute(|| { assert_eq!(Strikes::::get(10), 0); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); run_to_block(8); @@ -227,14 +227,14 @@ fn basic_new_member_reject_works() { // Starting Balance assert_eq!(Balances::free_balance(20), 50); // 20 makes a bid - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotation Period run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // We say no - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); + assert_ok!(Society::vote(Origin::signed(10), 20, false)); run_to_block(8); // User is not added as member assert_eq!(Society::members(), vec![10]); @@ -248,19 +248,19 @@ fn basic_new_member_reject_works() { fn slash_payout_works() { EnvBuilder::new().execute(|| { assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); + assert_ok!(Society::bid(Origin::signed(20), 1000)); run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); run_to_block(8); // payout in queue assert_eq!(Payouts::::get(20), vec![(9, 1000)]); - assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NoPayout); + assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); // slash payout assert_eq!(Society::slash_payout(&20, 500), 500); assert_eq!(Payouts::::get(20), vec![(9, 500)]); run_to_block(9); // payout should be here, but 500 less - assert_ok!(Society::payout(RuntimeOrigin::signed(20))); + assert_ok!(Society::payout(Origin::signed(20))); assert_eq!(Balances::free_balance(20), 550); }); } @@ -295,10 +295,10 @@ fn suspended_member_life_cycle_works() { assert_eq!(>::get(20), false); // Let's suspend account 20 by giving them 2 strikes by not voting - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); + assert_ok!(Society::bid(Origin::signed(30), 0)); run_to_block(8); assert_eq!(Strikes::::get(20), 1); - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); + assert_ok!(Society::bid(Origin::signed(40), 0)); run_to_block(16); // Strike 2 is accumulated, and 20 is suspended :( @@ -307,17 +307,14 @@ fn suspended_member_life_cycle_works() { // Suspended members cannot get payout Society::bump_payout(&20, 10, 100); - assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NotMember); + assert_noop!(Society::payout(Origin::signed(20)), Error::::NotMember); // Normal people cannot make judgement - assert_noop!( - Society::judge_suspended_member(RuntimeOrigin::signed(20), 20, true), - BadOrigin - ); + assert_noop!(Society::judge_suspended_member(Origin::signed(20), 20, true), BadOrigin); // Suspension judgment origin can judge thee // Suspension judgement origin forgives the suspended member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, true)); + assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, true)); assert_eq!(>::get(20), false); assert_eq!(>::get(), vec![10, 20]); @@ -325,7 +322,7 @@ fn suspended_member_life_cycle_works() { Society::suspend_member(&20); assert_eq!(>::get(20), true); // Suspension judgement origin does not forgive the suspended member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); // Cleaned up assert_eq!(>::get(20), false); assert_eq!(>::get(), vec![10]); @@ -340,14 +337,14 @@ fn suspended_candidate_rejected_works() { assert_eq!(Balances::free_balance(20), 50); assert_eq!(Balances::free_balance(Society::account_id()), 10000); // 20 makes a bid - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotation Period run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // We say no - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); + assert_ok!(Society::vote(Origin::signed(10), 20, false)); run_to_block(8); // User is not added as member assert_eq!(Society::members(), vec![10]); @@ -357,22 +354,18 @@ fn suspended_candidate_rejected_works() { // Normal user cannot make judgement on suspended candidate assert_noop!( - Society::judge_suspended_candidate(RuntimeOrigin::signed(20), 20, Judgement::Approve), + Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), BadOrigin ); // Suspension judgement origin makes no direct judgement - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - 20, - Judgement::Rebid - )); + assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Rebid)); // They are placed back in bid pool, repeat suspension process // Rotation Period run_to_block(12); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // We say no - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); + assert_ok!(Society::vote(Origin::signed(10), 20, false)); run_to_block(16); // User is not added as member assert_eq!(Society::members(), vec![10]); @@ -381,11 +374,7 @@ fn suspended_candidate_rejected_works() { assert_eq!(Society::suspended_candidate(20).is_some(), true); // Suspension judgement origin rejects the candidate - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - 20, - Judgement::Reject - )); + assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Reject)); // User is slashed assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 0); @@ -403,16 +392,13 @@ fn vouch_works() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // A non-member cannot vouch - assert_noop!( - Society::vouch(RuntimeOrigin::signed(1), 20, 1000, 100), - Error::::NotMember - ); + assert_noop!(Society::vouch(Origin::signed(1), 20, 1000, 100), Error::::NotMember); // A member can though - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 1000, 100)); + assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // A member cannot vouch twice at the same time assert_noop!( - Society::vouch(RuntimeOrigin::signed(10), 30, 100, 0), + Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching ); // Vouching creates the right kind of bid @@ -421,7 +407,7 @@ fn vouch_works() { run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); // Vote yes - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); // Vouched user can win run_to_block(8); assert_eq!(Society::members(), vec![10, 20]); @@ -440,14 +426,14 @@ fn voucher_cannot_win_more_than_bid() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // 10 vouches, but asks for more than the bid - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 1000)); + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 1000)); // Vouching creates the right kind of bid assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); // Vouched user can become candidate run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); // Vote yes - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); // Vouched user can win run_to_block(8); assert_eq!(Society::members(), vec![10, 20]); @@ -464,25 +450,25 @@ fn unvouch_works() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // 10 vouches for 20 - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); // 20 has a bid assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); // 10 is vouched assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // To unvouch, you must know the right bid position - assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 2), Error::::BadPosition); + assert_noop!(Society::unvouch(Origin::signed(10), 2), Error::::BadPosition); // 10 can unvouch with the right position - assert_ok!(Society::unvouch(RuntimeOrigin::signed(10), 0)); + assert_ok!(Society::unvouch(Origin::signed(10), 0)); // 20 no longer has a bid assert_eq!(>::get(), vec![]); // 10 is no longer vouching assert_eq!(>::get(10), None); // Cannot unvouch after they become candidate - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); - assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 0), Error::::BadPosition); + assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::BadPosition); // 10 is still vouching until candidate is approved or rejected assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); run_to_block(8); @@ -492,22 +478,18 @@ fn unvouch_works() { // User is stuck vouching until judgement origin resolves suspended candidate assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // Judge denies candidate - assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), - 20, - Judgement::Reject - )); + assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Reject)); // 10 is banned from vouching assert_eq!(>::get(10), Some(VouchingStatus::Banned)); assert_eq!(Society::members(), vec![10]); // 10 cannot vouch again assert_noop!( - Society::vouch(RuntimeOrigin::signed(10), 30, 100, 0), + Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching ); // 10 cannot unvouch either, so they are banned forever. - assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 0), Error::::NotVouching); + assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::NotVouching); }); } @@ -517,13 +499,13 @@ fn unbid_vouch_works() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // 10 vouches for 20 - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); // 20 has a bid assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); // 10 is vouched assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // 20 doesn't want to be a member and can unbid themselves. - assert_ok!(Society::unbid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::unbid(Origin::signed(20), 0)); // Everything is cleaned up assert_eq!(>::get(10), None); assert_eq!(>::get(), vec![]); @@ -538,22 +520,22 @@ fn founder_and_head_cannot_be_removed() { assert_eq!(Society::founder(), Some(10)); assert_eq!(Society::head(), Some(10)); // 10 can still accumulate strikes - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); run_to_block(8); assert_eq!(Strikes::::get(10), 1); - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); + assert_ok!(Society::bid(Origin::signed(30), 0)); run_to_block(16); assert_eq!(Strikes::::get(10), 2); // Awkwardly they can obtain more than MAX_STRIKES... - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); + assert_ok!(Society::bid(Origin::signed(40), 0)); run_to_block(24); assert_eq!(Strikes::::get(10), 3); // Replace the head - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 0)); + assert_ok!(Society::bid(Origin::signed(50), 0)); run_to_block(28); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around + assert_ok!(Society::vote(Origin::signed(10), 50, true)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around run_to_block(32); assert_eq!(Society::members(), vec![10, 50]); assert_eq!(Society::head(), Some(50)); @@ -561,29 +543,29 @@ fn founder_and_head_cannot_be_removed() { assert_eq!(Society::founder(), Some(10)); // 50 can still accumulate strikes - assert_ok!(Society::bid(RuntimeOrigin::signed(60), 0)); + assert_ok!(Society::bid(Origin::signed(60), 0)); run_to_block(40); assert_eq!(Strikes::::get(50), 1); - assert_ok!(Society::bid(RuntimeOrigin::signed(70), 0)); + assert_ok!(Society::bid(Origin::signed(70), 0)); run_to_block(48); assert_eq!(Strikes::::get(50), 2); // Replace the head - assert_ok!(Society::bid(RuntimeOrigin::signed(80), 0)); + assert_ok!(Society::bid(Origin::signed(80), 0)); run_to_block(52); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 80, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(50), 80, true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around + assert_ok!(Society::vote(Origin::signed(10), 80, true)); + assert_ok!(Society::vote(Origin::signed(50), 80, true)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around run_to_block(56); assert_eq!(Society::members(), vec![10, 50, 80]); assert_eq!(Society::head(), Some(80)); assert_eq!(Society::founder(), Some(10)); // 50 can now be suspended for strikes - assert_ok!(Society::bid(RuntimeOrigin::signed(90), 0)); + assert_ok!(Society::bid(Origin::signed(90), 0)); run_to_block(60); // The candidate is rejected, so voting approve will give a strike - assert_ok!(Society::vote(RuntimeOrigin::signed(50), 90, true)); + assert_ok!(Society::vote(Origin::signed(50), 90, true)); run_to_block(64); assert_eq!(Strikes::::get(50), 0); assert_eq!(>::get(50), true); @@ -610,22 +592,19 @@ fn challenges_work() { run_to_block(8); assert_eq!(Society::defender(), Some(30)); // They can always free vote for themselves - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), true)); // If no one else votes, nothing happens run_to_block(16); assert_eq!(Society::members(), vec![10, 20, 30, 40]); // New challenge period assert_eq!(Society::defender(), Some(30)); // Non-member cannot challenge - assert_noop!( - Society::defender_vote(RuntimeOrigin::signed(1), true), - Error::::NotMember - ); + assert_noop!(Society::defender_vote(Origin::signed(1), true), Error::::NotMember); // 3 people say accept, 1 reject - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(20), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(40), false)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); + assert_ok!(Society::defender_vote(Origin::signed(20), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), true)); + assert_ok!(Society::defender_vote(Origin::signed(40), false)); run_to_block(24); // 20 survives assert_eq!(Society::members(), vec![10, 20, 30, 40]); @@ -637,10 +616,10 @@ fn challenges_work() { // One more time assert_eq!(Society::defender(), Some(30)); // 2 people say accept, 2 reject - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(20), true)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), false)); - assert_ok!(Society::defender_vote(RuntimeOrigin::signed(40), false)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); + assert_ok!(Society::defender_vote(Origin::signed(20), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), false)); + assert_ok!(Society::defender_vote(Origin::signed(40), false)); run_to_block(32); // 20 is suspended assert_eq!(Society::members(), vec![10, 20, 40]); @@ -674,12 +653,12 @@ fn bad_vote_slash_works() { assert_eq!(>::get(30), vec![(5, 100)]); assert_eq!(>::get(40), vec![(5, 100)]); // Create a new bid - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 1000)); + assert_ok!(Society::bid(Origin::signed(50), 1000)); run_to_block(4); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, false)); - assert_ok!(Society::vote(RuntimeOrigin::signed(20), 50, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(30), 50, false)); - assert_ok!(Society::vote(RuntimeOrigin::signed(40), 50, false)); + assert_ok!(Society::vote(Origin::signed(10), 50, false)); + assert_ok!(Society::vote(Origin::signed(20), 50, true)); + assert_ok!(Society::vote(Origin::signed(30), 50, false)); + assert_ok!(Society::vote(Origin::signed(40), 50, false)); run_to_block(8); // Wrong voter gained a strike assert_eq!(>::get(10), 0); @@ -698,15 +677,15 @@ fn bad_vote_slash_works() { fn user_cannot_bid_twice() { EnvBuilder::new().execute(|| { // Cannot bid twice - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 100)); - assert_noop!(Society::bid(RuntimeOrigin::signed(20), 100), Error::::AlreadyBid); + assert_ok!(Society::bid(Origin::signed(20), 100)); + assert_noop!(Society::bid(Origin::signed(20), 100), Error::::AlreadyBid); // Cannot bid when vouched - assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 30, 100, 100)); - assert_noop!(Society::bid(RuntimeOrigin::signed(30), 100), Error::::AlreadyBid); + assert_ok!(Society::vouch(Origin::signed(10), 30, 100, 100)); + assert_noop!(Society::bid(Origin::signed(30), 100), Error::::AlreadyBid); // Cannot vouch when already bid assert_ok!(Society::add_member(&50)); assert_noop!( - Society::vouch(RuntimeOrigin::signed(50), 20, 100, 100), + Society::vouch(Origin::signed(50), 20, 100, 100), Error::::AlreadyBid ); }); @@ -718,7 +697,7 @@ fn vouching_handles_removed_member_with_bid() { // Add a member assert_ok!(Society::add_member(&20)); // Have that member vouch for a user - assert_ok!(Society::vouch(RuntimeOrigin::signed(20), 30, 1000, 100)); + assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); // That user is now a bid and the member is vouching assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); @@ -729,7 +708,7 @@ fn vouching_handles_removed_member_with_bid() { assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); // Remove member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); // Bid is removed, vouching status is removed assert_eq!(>::get(), vec![]); assert_eq!(>::get(20), None); @@ -742,7 +721,7 @@ fn vouching_handles_removed_member_with_candidate() { // Add a member assert_ok!(Society::add_member(&20)); // Have that member vouch for a user - assert_ok!(Society::vouch(RuntimeOrigin::signed(20), 30, 1000, 100)); + assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); // That user is now a bid and the member is vouching assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); @@ -756,12 +735,12 @@ fn vouching_handles_removed_member_with_candidate() { assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); // Remove member - assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); // Vouching status is removed, but candidate is still in the queue assert_eq!(>::get(20), None); assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); // Candidate wins - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); run_to_block(8); assert_eq!(Society::members(), vec![10, 30]); // Payout does not go to removed member @@ -774,19 +753,16 @@ fn vouching_handles_removed_member_with_candidate() { fn votes_are_working() { EnvBuilder::new().execute(|| { // Users make bids of various amounts - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 500)); - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 400)); - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 300)); + assert_ok!(Society::bid(Origin::signed(50), 500)); + assert_ok!(Society::bid(Origin::signed(40), 400)); + assert_ok!(Society::bid(Origin::signed(30), 300)); // Rotate period run_to_block(4); // A member votes for these candidates to join the society - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 40, true)); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 40, true)); // You cannot vote for a non-candidate - assert_noop!( - Society::vote(RuntimeOrigin::signed(10), 50, true), - Error::::NotCandidate - ); + assert_noop!(Society::vote(Origin::signed(10), 50, true), Error::::NotCandidate); // Votes are stored assert_eq!(>::get(30, 10), Some(Vote::Approve)); assert_eq!(>::get(40, 10), Some(Vote::Approve)); @@ -808,7 +784,7 @@ fn max_limits_work() { for i in (100..1110).rev() { // Give them some funds let _ = Balances::make_free_balance_be(&(i as u128), 1000); - assert_ok!(Society::bid(RuntimeOrigin::signed(i as u128), i)); + assert_ok!(Society::bid(Origin::signed(i as u128), i)); } let bids = >::get(); // Length is 1000 @@ -834,7 +810,7 @@ fn max_limits_work() { // Fill up members with suspended candidates from the first rotation for i in 100..104 { assert_ok!(Society::judge_suspended_candidate( - RuntimeOrigin::signed(2), + Origin::signed(2), i, Judgement::Approve )); @@ -845,9 +821,9 @@ fn max_limits_work() { // However, a fringe scenario allows for in-progress candidates to increase the membership // pool, but it has no real after-effects. for i in Society::members().iter() { - assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 110, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 111, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 112, true)); + assert_ok!(Society::vote(Origin::signed(*i), 110, true)); + assert_ok!(Society::vote(Origin::signed(*i), 111, true)); + assert_ok!(Society::vote(Origin::signed(*i), 112, true)); } // Rotate period run_to_block(12); @@ -856,7 +832,7 @@ fn max_limits_work() { // No candidates because full assert_eq!(Society::candidates().len(), 0); // Increase member limit - assert_ok!(Society::set_max_members(RuntimeOrigin::root(), 200)); + assert_ok!(Society::set_max_members(Origin::root(), 200)); // Rotate period run_to_block(16); // Candidates are back! @@ -871,11 +847,11 @@ fn zero_bid_works() { // * That zero bid is placed as head when accepted. EnvBuilder::new().execute(|| { // Users make bids of various amounts - assert_ok!(Society::bid(RuntimeOrigin::signed(60), 400)); - assert_ok!(Society::bid(RuntimeOrigin::signed(50), 300)); - assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); - assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); - assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); + assert_ok!(Society::bid(Origin::signed(60), 400)); + assert_ok!(Society::bid(Origin::signed(50), 300)); + assert_ok!(Society::bid(Origin::signed(30), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(40), 0)); // Rotate period run_to_block(4); @@ -896,9 +872,9 @@ fn zero_bid_works() { vec![create_bid(0, 20, BidKind::Deposit(25)), create_bid(0, 40, BidKind::Deposit(25)),] ); // A member votes for these candidates to join the society - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, true)); - assert_ok!(Society::vote(RuntimeOrigin::signed(10), 60, true)); + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 50, true)); + assert_ok!(Society::vote(Origin::signed(10), 60, true)); run_to_block(8); // Candidates become members after a period rotation assert_eq!(Society::members(), vec![10, 30, 50, 60]); @@ -916,7 +892,7 @@ fn bids_ordered_correctly() { for j in 0..5 { // Give them some funds let _ = Balances::make_free_balance_be(&(100 + (i * 5 + j) as u128), 1000); - assert_ok!(Society::bid(RuntimeOrigin::signed(100 + (i * 5 + j) as u128), j)); + assert_ok!(Society::bid(Origin::signed(100 + (i * 5 + j) as u128), j)); } } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index cf9e12dcd82b4..0950478fba089 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -43,7 +43,7 @@ sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elect pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } -pallet-bags-list = { version = "4.0.0-dev", path = "../bags-list" } +pallet-bags-list = { version = "4.0.0-dev", features = ["runtime-benchmarks"], path = "../bags-list" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } @@ -52,7 +52,6 @@ rand_chacha = { version = "0.2" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "serde", "codec/std", "scale-info/std", @@ -62,7 +61,6 @@ std = [ "sp-runtime/std", "sp-staking/std", "pallet-session/std", - "pallet-bags-list/std", "frame-system/std", "pallet-authorship/std", "sp-application-crypto/std", @@ -73,11 +71,6 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand_chacha", - "sp-staking/runtime-benchmarks", - "pallet-bags-list/runtime-benchmarks", + "sp-staking/runtime-benchmarks" ] try-runtime = ["frame-support/try-runtime"] -fuzz = [ - "pallet-bags-list/fuzz", - "frame-election-provider-support/fuzz", -] diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index e1ea8aa7b15d5..e66f6fde37599 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -28,7 +28,7 @@ use syn::parse::{Parse, ParseStream}; /// Accepts a number of expressions to create a instance of PiecewiseLinear which represents the /// NPoS curve (as detailed -/// [here](https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model)) +/// [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model)) /// for those parameters. Parameters are: /// - `min_inflation`: the minimal amount to be rewarded between validators, expressed as a fraction /// of total issuance. Known as `I_0` in the literature. Expressed in millionth, must be between 0 diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index dcb861e2ce419..12de0ff9cc665 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -83,12 +83,11 @@ pub fn create_validator_with_nominators( let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; - let stash_lookup = T::Lookup::unlookup(v_stash.clone()); + let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); points_total += 10; points_individual.push((v_stash.clone(), 10)); - let original_nominator_count = Nominators::::count(); let mut nominators = Vec::new(); // Give the validator n nominators, but keep total users in the system the same. @@ -115,7 +114,7 @@ pub fn create_validator_with_nominators( assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); assert_ne!(Validators::::count(), 0); - assert_eq!(Nominators::::count(), original_nominator_count + nominators.len() as u32); + assert_ne!(Nominators::::count(), 0); // Give Era Points let reward = EraRewardPoints:: { @@ -218,7 +217,8 @@ benchmarks! { bond { let stash = create_funded_user::("stash", USER_SEED, 100); let controller = create_funded_user::("controller", USER_SEED, 100); - let controller_lookup = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source + = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; let amount = T::Currency::minimum_balance() * 10u32.into(); whitelist_account!(stash); @@ -365,7 +365,7 @@ benchmarks! { 100, Default::default(), )?; - let stash_lookup = T::Lookup::unlookup(stash.clone()); + let stash_lookup: ::Source = T::Lookup::unlookup(stash.clone()); // they start validating. Staking::::validate(RawOrigin::Signed(controller.clone()).into(), Default::default())?; @@ -545,7 +545,7 @@ benchmarks! { } payout_stakers_dead_controller { - let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -578,7 +578,7 @@ benchmarks! { } payout_stakers_alive_staked { - let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -614,7 +614,7 @@ benchmarks! { } rebond { - let l in 1 .. T::MaxUnlockingChunks::get() as u32; + let l in 1 .. MaxUnlockingChunks::get() as u32; // clean up any existing state. clear_validators_and_nominators::(); @@ -661,6 +661,25 @@ benchmarks! { assert!(original_bonded < new_bonded); } + set_history_depth { + let e in 1 .. 100; + HistoryDepth::::put(e); + CurrentEra::::put(e); + let dummy = || -> T::AccountId { codec::Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap() }; + for i in 0 .. e { + >::insert(i, dummy(), Exposure::>::default()); + >::insert(i, dummy(), Exposure::>::default()); + >::insert(i, dummy(), ValidatorPrefs::default()); + >::insert(i, BalanceOf::::one()); + >::insert(i, EraRewardPoints::::default()); + >::insert(i, BalanceOf::::one()); + ErasStartSessionIndex::::insert(i, i); + } + }: _(RawOrigin::Root, EraIndex::zero(), u32::MAX) + verify { + assert_eq!(HistoryDepth::::get(), 0); + } + reap_stash { let s in 1 .. MAX_SPANS; // clean up any existing state. @@ -680,7 +699,7 @@ benchmarks! { active: T::Currency::minimum_balance() - One::one(), total: T::Currency::minimum_balance() - One::one(), unlocking: Default::default(), - claimed_rewards: Default::default(), + claimed_rewards: vec![], }; Ledger::::insert(&controller, l); @@ -696,7 +715,7 @@ benchmarks! { new_era { let v in 1 .. 10; - let n in 0 .. 100; + let n in 1 .. 100; create_validators_with_nominators_for_era::( v, @@ -715,7 +734,7 @@ benchmarks! { #[extra] payout_all { let v in 1 .. 10; - let n in 0 .. 100; + let n in 1 .. 100; create_validators_with_nominators_for_era::( v, n, @@ -765,7 +784,7 @@ benchmarks! { #[extra] do_slash { - let l in 1 .. T::MaxUnlockingChunks::get() as u32; + let l in 1 .. MaxUnlockingChunks::get() as u32; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { @@ -826,7 +845,7 @@ benchmarks! { v, n, T::MaxNominations::get() as usize, false, None )?; }: { - let targets = >::get_npos_targets(None); + let targets = >::get_npos_targets(); assert_eq!(targets.len() as u32, v); } @@ -935,7 +954,7 @@ benchmarks! { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Balances, ExtBuilder, RuntimeOrigin, Staking, Test}; + use crate::mock::{Balances, ExtBuilder, Origin, Staking, Test}; use frame_support::assert_ok; #[test] @@ -982,11 +1001,7 @@ mod tests { let current_era = CurrentEra::::get().unwrap(); let original_free_balance = Balances::free_balance(&validator_stash); - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), - validator_stash, - current_era - )); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), validator_stash, current_era)); let new_free_balance = Balances::free_balance(&validator_stash); assert!(original_free_balance < new_free_balance); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 0f5b8e0123ab6..ab0ab685e6911 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -299,17 +299,18 @@ pub mod weights; mod pallet; -use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; +use codec::{Decode, Encode, HasCompact}; use frame_support::{ + parameter_types, traits::{Currency, Defensive, Get}, weights::Weight, - BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, + BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_runtime::{ curve::PiecewiseLinear, - traits::{AtLeast32BitUnsigned, Convert, Saturating, StaticLookup, Zero}, - Perbill, Perquintill, Rounding, RuntimeDebug, + traits::{AtLeast32BitUnsigned, Convert, Saturating, Zero}, + Perbill, Perquintill, RuntimeDebug, }; use sp_staking::{ offence::{Offence, OffenceError, ReportOffence}, @@ -333,10 +334,6 @@ macro_rules! log { }; } -/// Maximum number of winners (aka. active validators), as defined in the election provider of this -/// pallet. -pub type MaxWinnersOf = <::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners; - /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; @@ -350,10 +347,12 @@ type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +parameter_types! { + pub MaxUnlockingChunks: u32 = 32; +} /// Information regarding the active era (era in used in session). -#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ActiveEraInfo { /// Index of era. pub index: EraIndex, @@ -394,7 +393,7 @@ pub enum StakerStatus { } /// A destination account for payment. -#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum RewardDestination { /// Pay into the stash account, increasing the amount at stake accordingly. Staked, @@ -415,7 +414,7 @@ impl Default for RewardDestination { } /// Preference of what happens regarding validation. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Default, MaxEncodedLen)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Default)] pub struct ValidatorPrefs { /// Reward that validator takes up-front; only the rest is split between themselves and /// nominators. @@ -428,8 +427,8 @@ pub struct ValidatorPrefs { } /// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct UnlockChunk { +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct UnlockChunk { /// Amount of funds to be unlocked. #[codec(compact)] value: Balance, @@ -439,16 +438,7 @@ pub struct UnlockChunk { } /// The ledger of a (bonded) stash. -#[derive( - PartialEqNoBound, - EqNoBound, - CloneNoBound, - Encode, - Decode, - RuntimeDebugNoBound, - TypeInfo, - MaxEncodedLen, -)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct StakingLedger { /// The stash account whose balance is actually locked and at stake. @@ -464,10 +454,10 @@ pub struct StakingLedger { /// Any balance that is becoming free, which may eventually be transferred out of the stash /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first /// in, first out queue where the new (higher value) eras get pushed on the back. - pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, + pub unlocking: BoundedVec>, MaxUnlockingChunks>, /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. - pub claimed_rewards: BoundedVec, + pub claimed_rewards: Vec, } impl StakingLedger { @@ -478,7 +468,7 @@ impl StakingLedger { total: Zero::zero(), active: Zero::zero(), unlocking: Default::default(), - claimed_rewards: Default::default(), + claimed_rewards: vec![], } } @@ -562,7 +552,7 @@ impl StakingLedger { /// /// This calls `Config::OnStakerSlash::on_slash` with information as to how the slash was /// applied. - pub fn slash( + fn slash( &mut self, slash_amount: BalanceOf, minimum_balance: BalanceOf, @@ -572,7 +562,6 @@ impl StakingLedger { return Zero::zero() } - use sp_runtime::PerThing as _; use sp_staking::OnStakerSlash as _; let mut remaining_slash = slash_amount; let pre_slash_total = self.total; @@ -603,12 +592,7 @@ impl StakingLedger { } }); let affected_balance = self.active.saturating_add(unbonding_affected_balance); - let ratio = Perquintill::from_rational_with_rounding( - slash_amount, - affected_balance, - Rounding::Up, - ) - .unwrap_or_else(|_| Perquintill::one()); + let ratio = Perquintill::from_rational(slash_amount, affected_balance); ( Some(ratio), affected_indices.chain((0..first_slashable_index).rev()).collect::>(), @@ -632,7 +616,7 @@ impl StakingLedger { let mut slash_out_of = |target: &mut BalanceOf, slash_remaining: &mut BalanceOf| { let mut slash_from_target = if let Some(ratio) = maybe_proportional { - ratio.mul_ceil(*target) + ratio * (*target) } else { *slash_remaining } @@ -684,9 +668,7 @@ impl StakingLedger { } /// A record of the nominations made by a specific account. -#[derive( - PartialEqNoBound, EqNoBound, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, -)] +#[derive(PartialEqNoBound, EqNoBound, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo)] #[codec(mel_bound())] #[scale_info(skip_type_params(T))] pub struct Nominations { @@ -860,7 +842,7 @@ impl VoterBagsList - V12_0_0, // remove `HistoryDepth`. } impl Default for Releases { fn default() -> Self { - Releases::V11_0_0 + Releases::V10_0_0 } } @@ -952,9 +932,7 @@ where if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { R::report_offence(reporters, offence) } else { - >::deposit_event(Event::::OldSlashingReportDiscarded { - session_index: offence_session, - }); + >::deposit_event(Event::::OldSlashingReportDiscarded(offence_session)); Ok(()) } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index f2ccb4f8b096f..7e3bf6ccb93e1 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -20,155 +20,6 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::traits::OnRuntimeUpgrade; -pub mod v12 { - use super::*; - use frame_support::{pallet_prelude::ValueQuery, storage_alias}; - - #[storage_alias] - type HistoryDepth = StorageValue, u32, ValueQuery>; - - /// Clean up `HistoryDepth` from storage. - /// - /// We will be depending on the configurable value of `HistoryDepth` post - /// this release. - pub struct MigrateToV12(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV12 { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - frame_support::ensure!( - StorageVersion::::get() == Releases::V11_0_0, - "Expected v11 before upgrading to v12" - ); - - if HistoryDepth::::exists() { - frame_support::ensure!( - T::HistoryDepth::get() == HistoryDepth::::get(), - "Provided value of HistoryDepth should be same as the existing storage value" - ); - } else { - log::info!("No HistoryDepth in storage; nothing to remove"); - } - - Ok(Default::default()) - } - - fn on_runtime_upgrade() -> frame_support::weights::Weight { - if StorageVersion::::get() == Releases::V11_0_0 { - HistoryDepth::::kill(); - StorageVersion::::put(Releases::V12_0_0); - - log!(info, "v12 applied successfully"); - T::DbWeight::get().reads_writes(1, 2) - } else { - log!(warn, "Skipping v12, should be removed"); - T::DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - frame_support::ensure!( - StorageVersion::::get() == crate::Releases::V12_0_0, - "v12 not applied" - ); - Ok(()) - } - } -} - -pub mod v11 { - use super::*; - use frame_support::{ - storage::migration::move_pallet, - traits::{GetStorageVersion, PalletInfoAccess}, - }; - #[cfg(feature = "try-runtime")] - use sp_io::hashing::twox_128; - - pub struct MigrateToV11(sp_std::marker::PhantomData<(T, P, N)>); - impl> OnRuntimeUpgrade - for MigrateToV11 - { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - frame_support::ensure!( - StorageVersion::::get() == crate::Releases::V10_0_0, - "must upgrade linearly" - ); - let old_pallet_prefix = twox_128(N::get().as_bytes()); - - frame_support::ensure!( - sp_io::storage::next_key(&old_pallet_prefix).is_some(), - "no data for the old pallet name has been detected" - ); - - Ok(Default::default()) - } - - /// Migrate the entire storage of this pallet to a new prefix. - /// - /// This new prefix must be the same as the one set in construct_runtime. For safety, use - /// `PalletInfo` to get it, as: - /// `::PalletInfo::name::`. - /// - /// The migration will look into the storage version in order to avoid triggering a - /// migration on an up to date storage. - fn on_runtime_upgrade() -> Weight { - let old_pallet_name = N::get(); - let new_pallet_name =

::name(); - - if StorageVersion::::get() == Releases::V10_0_0 { - // bump version anyway, even if we don't need to move the prefix - StorageVersion::::put(Releases::V11_0_0); - if new_pallet_name == old_pallet_name { - log!( - warn, - "new bags-list name is equal to the old one, only bumping the version" - ); - return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1)) - } - - move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes()); - ::BlockWeights::get().max_block - } else { - log!(warn, "v11::migrate should be removed."); - T::DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - frame_support::ensure!( - StorageVersion::::get() == crate::Releases::V11_0_0, - "wrong version after the upgrade" - ); - - let old_pallet_name = N::get(); - let new_pallet_name =

::name(); - - // skip storage prefix checks for the same pallet names - if new_pallet_name == old_pallet_name { - return Ok(()) - } - - let old_pallet_prefix = twox_128(N::get().as_bytes()); - frame_support::ensure!( - sp_io::storage::next_key(&old_pallet_prefix).is_none(), - "old pallet data hasn't been removed" - ); - - let new_pallet_name =

::name(); - let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); - frame_support::ensure!( - sp_io::storage::next_key(&new_pallet_prefix).is_some(), - "new pallet data hasn't been created" - ); - - Ok(()) - } - } -} - pub mod v10 { use super::*; use frame_support::storage_alias; @@ -210,10 +61,6 @@ pub mod v10 { pub mod v9 { use super::*; - #[cfg(feature = "try-runtime")] - use frame_support::codec::{Decode, Encode}; - #[cfg(feature = "try-runtime")] - use sp_std::vec::Vec; /// Migration implementation that injects all validators into sorted list. /// @@ -252,22 +99,23 @@ pub mod v9 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { + fn pre_upgrade() -> Result<(), &'static str> { + use frame_support::traits::OnRuntimeUpgradeHelpersExt; frame_support::ensure!( StorageVersion::::get() == crate::Releases::V8_0_0, "must upgrade linearly" ); let prev_count = T::VoterList::count(); - Ok(prev_count.encode()) + Self::set_temp_storage(prev_count, "prev"); + Ok(()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(prev_count: Vec) -> Result<(), &'static str> { - let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( - "the state parameter should be something that was generated by pre_upgrade", - ); + fn post_upgrade() -> Result<(), &'static str> { + use frame_support::traits::OnRuntimeUpgradeHelpersExt; let post_count = T::VoterList::count(); + let prev_count = Self::get_temp_storage::("prev").unwrap(); let validators = Validators::::count(); assert!(post_count == prev_count + validators); @@ -305,7 +153,7 @@ pub mod v8 { Nominators::::iter().map(|(id, _)| id), Pallet::::weight_of_fn(), ); - debug_assert_eq!(T::VoterList::try_state(), Ok(())); + debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); StorageVersion::::put(crate::Releases::V8_0_0); crate::log!( @@ -322,7 +170,7 @@ pub mod v8 { #[cfg(feature = "try-runtime")] pub fn post_migrate() -> Result<(), &'static str> { - T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?; + T::VoterList::sanity_check().map_err(|_| "VoterList is not in a sane state.")?; crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",); Ok(()) } diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 16e4e5ddd7aa2..d9dc97f9c1127 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -18,7 +18,9 @@ //! Test utilities use crate::{self as pallet_staking, *}; -use frame_election_provider_support::{onchain, SequentialPhragmen, VoteWeight}; +use frame_election_provider_support::{ + onchain, SequentialPhragmen, SortedListProvider, VoteWeight, +}; use frame_support::{ assert_ok, parameter_types, traits::{ @@ -35,6 +37,7 @@ use sp_runtime::{ traits::{IdentityLookup, Zero}, }; use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; +use std::cell::RefCell; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -97,7 +100,7 @@ frame_support::construct_runtime!( Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, Historical: pallet_session::historical::{Pallet, Storage}, - VoterBagsList: pallet_bags_list::::{Pallet, Call, Storage, Event}, + BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, } ); @@ -129,16 +132,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -155,7 +158,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -172,7 +175,7 @@ impl pallet_session::Config for Test { type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = (OtherSessionHandler,); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; type NextSessionRotation = pallet_session::PeriodicSessions; @@ -213,16 +216,16 @@ parameter_types! { pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } -parameter_types! { - pub static RewardRemainderUnbalanced: u128 = 0; +thread_local! { + pub static REWARD_REMAINDER_UNBALANCED: RefCell = RefCell::new(0); } pub struct RewardRemainderMock; impl OnUnbalanced> for RewardRemainderMock { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { - RewardRemainderUnbalanced::mutate(|v| { - *v += amount.peek(); + REWARD_REMAINDER_UNBALANCED.with(|v| { + *v.borrow_mut() += amount.peek(); }); drop(amount); } @@ -234,18 +237,13 @@ const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static MaxNominations: u32 = 16; - pub static HistoryDepth: u32 = 80; - pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); - pub static MaxWinners: u32 = 100; } -type VoterBagsListInstance = pallet_bags_list::Instance1; -impl pallet_bags_list::Config for Test { - type RuntimeEvent = RuntimeEvent; +impl pallet_bags_list::Config for Test { + type Event = Event; type WeightInfo = (); - // Staking is the source of truth for voter bags list, since they are not kept up to date. type ScoreProvider = Staking; type BagThresholds = BagThresholds; type Score = VoteWeight; @@ -257,9 +255,6 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = MaxWinners; - type VotersBound = ConstU32<{ u32::MAX }>; - type TargetsBound = ConstU32<{ u32::MAX }>; } pub struct MockReward {} @@ -287,7 +282,7 @@ impl crate::pallet::pallet::Config for Test { type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = RewardRemainderMock; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Slash = (); type Reward = MockReward; type SessionsPerEra = SessionsPerEra; @@ -299,20 +294,18 @@ impl crate::pallet::pallet::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; - type ElectionProvider = onchain::OnChainExecution; + type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. - type VoterList = VoterBagsList; - type TargetList = UseValidatorsMap; - type MaxUnlockingChunks = MaxUnlockingChunks; - type HistoryDepth = HistoryDepth; + type VoterList = BagsList; + type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = OnStakerSlashMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); } pub(crate) type StakingCall = crate::Call; -pub(crate) type TestCall = ::RuntimeCall; +pub(crate) type TestRuntimeCall = ::Call; pub struct ExtBuilder { nominate: bool, @@ -550,12 +543,110 @@ impl ExtBuilder { sp_tracing::try_init_simple(); let mut ext = self.build(); ext.execute_with(test); - ext.execute_with(|| { - Staking::do_try_state(System::block_number()).unwrap(); - }); + ext.execute_with(post_conditions); } } +fn post_conditions() { + check_nominators(); + check_exposures(); + check_ledgers(); + check_count(); +} + +fn check_count() { + let nominator_count = Nominators::::iter_keys().count() as u32; + let validator_count = Validators::::iter().count() as u32; + assert_eq!(nominator_count, Nominators::::count()); + assert_eq!(validator_count, Validators::::count()); + + // the voters that the `VoterList` list is storing for us. + let external_voters = ::VoterList::count(); + assert_eq!(external_voters, nominator_count + validator_count); +} + +fn check_ledgers() { + // check the ledger of all stakers. + Bonded::::iter().for_each(|(_, ctrl)| assert_ledger_consistent(ctrl)) +} + +fn check_exposures() { + // a check per validator to ensure the exposure struct is always sane. + let era = active_era(); + ErasStakers::::iter_prefix_values(era).for_each(|expo| { + assert_eq!( + expo.total as u128, + expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), + "wrong total exposure.", + ); + }) +} + +fn check_nominators() { + // a check per nominator to ensure their entire stake is correctly distributed. Will only kick- + // in if the nomination was submitted before the current era. + let era = active_era(); + >::iter() + .filter_map( + |(nominator, nomination)| { + if nomination.submitted_in > era { + Some(nominator) + } else { + None + } + }, + ) + .for_each(|nominator| { + // must be bonded. + assert_is_stash(nominator); + let mut sum = 0; + Session::validators() + .iter() + .map(|v| Staking::eras_stakers(era, v)) + .for_each(|e| { + let individual = + e.others.iter().filter(|e| e.who == nominator).collect::>(); + let len = individual.len(); + match len { + 0 => { /* not supporting this validator at all. */ }, + 1 => sum += individual[0].value, + _ => panic!("nominator cannot back a validator more than once."), + }; + }); + + let nominator_stake = Staking::slashable_balance_of(&nominator); + // a nominator cannot over-spend. + assert!( + nominator_stake >= sum, + "failed: Nominator({}) stake({}) >= sum divided({})", + nominator, + nominator_stake, + sum, + ); + + let diff = nominator_stake - sum; + assert!(diff < 100); + }); +} + +fn assert_is_stash(acc: AccountId) { + assert!(Staking::bonded(&acc).is_some(), "Not a stash."); +} + +fn assert_ledger_consistent(ctrl: AccountId) { + // ensures ledger.total == ledger.active + sum(ledger.unlocking). + let ledger = Staking::ledger(ctrl).expect("Not a controller."); + let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); + assert_eq!(real_total, ledger.total); + assert!( + ledger.active >= Balances::minimum_balance() || ledger.active == 0, + "{}: active ledger amount ({}) must be greater than ED {}", + ctrl, + ledger.active, + Balances::minimum_balance() + ); +} + pub(crate) fn active_era() -> EraIndex { Staking::active_era().unwrap().index } @@ -567,22 +658,13 @@ pub(crate) fn current_era() -> EraIndex { pub(crate) fn bond(stash: AccountId, ctrl: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(stash), - ctrl, - val, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller)); } pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { bond(stash, ctrl, val); - assert_ok!(Staking::validate(RuntimeOrigin::signed(ctrl), ValidatorPrefs::default())); - assert_ok!(Session::set_keys( - RuntimeOrigin::signed(ctrl), - SessionKeys { other: ctrl.into() }, - vec![] - )); + assert_ok!(Staking::validate(Origin::signed(ctrl), ValidatorPrefs::default())); + assert_ok!(Session::set_keys(Origin::signed(ctrl), SessionKeys { other: ctrl.into() }, vec![])); } pub(crate) fn bond_nominator( @@ -592,7 +674,7 @@ pub(crate) fn bond_nominator( target: Vec, ) { bond(stash, ctrl, val); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(ctrl), target)); + assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); } /// Progress to the given block, triggering session and era changes as we progress. @@ -744,7 +826,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), *who)), + offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -762,7 +844,7 @@ pub(crate) fn make_all_reward_payment(era: EraIndex) { // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), ledger.stash, era)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); } } @@ -790,7 +872,7 @@ pub(crate) fn staking_events() -> Vec> { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::Staking(inner) = e { Some(inner) } else { None }) .collect() } @@ -801,7 +883,7 @@ parameter_types! { pub(crate) fn staking_events_since_last_call() -> Vec> { let all: Vec<_> = System::events() .into_iter() - .filter_map(|r| if let RuntimeEvent::Staking(inner) = r.event { Some(inner) } else { None }) + .filter_map(|r| if let Event::Staking(inner) = r.event { Some(inner) } else { None }) .collect(); let seen = StakingEventsIndex::get(); StakingEventsIndex::set(all.len()); diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 9be01dd823104..68aa97db8a324 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -18,17 +18,16 @@ //! Implementations for the Staking FRAME Pallet. use frame_election_provider_support::{ - data_provider, BoundedSupportsOf, ElectionDataProvider, ElectionProvider, ScoreProvider, - SortedListProvider, VoteWeight, VoterOf, + data_provider, ElectionDataProvider, ElectionProvider, ScoreProvider, SortedListProvider, + Supports, VoteWeight, VoterOf, }; use frame_support::{ - dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, CurrencyToVote, Defensive, DefensiveResult, EstimateNextNewSession, Get, - Imbalance, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, WithdrawReasons, + Currency, CurrencyToVote, Defensive, EstimateNextNewSession, Get, Imbalance, + LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, }, - weights::Weight, + weights::{Weight, WithPostDispatchInfo}, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; @@ -38,13 +37,13 @@ use sp_runtime::{ }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, - EraIndex, SessionIndex, Stake, StakingInterface, + EraIndex, SessionIndex, StakingInterface, }; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use crate::{ log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, Exposure, ExposureOf, - Forcing, IndividualExposure, MaxWinnersOf, Nominations, PositiveImbalanceOf, RewardDestination, + Forcing, IndividualExposure, Nominations, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; @@ -101,7 +100,7 @@ impl Pallet { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) })?; - let history_depth = T::HistoryDepth::get(); + let history_depth = Self::history_depth(); ensure!( era <= current_era && era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward @@ -123,18 +122,11 @@ impl Pallet { ledger .claimed_rewards .retain(|&x| x >= current_era.saturating_sub(history_depth)); - match ledger.claimed_rewards.binary_search(&era) { Ok(_) => return Err(Error::::AlreadyClaimed .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))), - Err(pos) => ledger - .claimed_rewards - .try_insert(pos, era) - // Since we retain era entries in `claimed_rewards` only upto - // `HistoryDepth`, following bound is always expected to be - // satisfied. - .defensive_map_err(|_| Error::::BoundNotMet)?, + Err(pos) => ledger.claimed_rewards.insert(pos, era), } let exposure = >::get(&era, &ledger.stash); @@ -181,20 +173,14 @@ impl Pallet { let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; - Self::deposit_event(Event::::PayoutStarted { - era_index: era, - validator_stash: ledger.stash.clone(), - }); + Self::deposit_event(Event::::PayoutStarted(era, ledger.stash.clone())); let mut total_imbalance = PositiveImbalanceOf::::zero(); // We can now make total validator payout: if let Some(imbalance) = Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) { - Self::deposit_event(Event::::Rewarded { - stash: ledger.stash, - amount: imbalance.peek(), - }); + Self::deposit_event(Event::::Rewarded(ledger.stash, imbalance.peek())); total_imbalance.subsume(imbalance); } @@ -214,8 +200,7 @@ impl Pallet { if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - let e = - Event::::Rewarded { stash: nominator.who.clone(), amount: imbalance.peek() }; + let e = Event::::Rewarded(nominator.who.clone(), imbalance.peek()); Self::deposit_event(e); total_imbalance.subsume(imbalance); } @@ -239,7 +224,7 @@ impl Pallet { let chilled_as_validator = Self::do_remove_validator(stash); let chilled_as_nominator = Self::do_remove_nominator(stash); if chilled_as_validator || chilled_as_nominator { - Self::deposit_event(Event::::Chilled { stash: stash.clone() }); + Self::deposit_event(Event::::Chilled(stash.clone())); } } @@ -267,10 +252,7 @@ impl Pallet { } /// Plan a new session potentially trigger a new era. - fn new_session( - session_index: SessionIndex, - is_genesis: bool, - ) -> Option>> { + fn new_session(session_index: SessionIndex, is_genesis: bool) -> Option> { if let Some(current_era) = Self::current_era() { // Initial era has been set. let current_era_start_session_index = Self::eras_start_session_index(current_era) @@ -401,18 +383,13 @@ impl Pallet { let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); let staked = Self::eras_total_stake(&active_era.index); let issuance = T::Currency::total_issuance(); - let (validator_payout, remainder) = - T::EraPayout::era_payout(staked, issuance, era_duration); + let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); - Self::deposit_event(Event::::EraPaid { - era_index: active_era.index, - validator_payout, - remainder, - }); + Self::deposit_event(Event::::EraPaid(active_era.index, validator_payout, rest)); // Set ending era reward. >::insert(&active_era.index, validator_payout); - T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); + T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); // Clear offending validators. >::kill(); @@ -429,11 +406,8 @@ impl Pallet { /// Returns the new validator set. pub fn trigger_new_era( start_session_index: SessionIndex, - exposures: BoundedVec< - (T::AccountId, Exposure>), - MaxWinnersOf, - >, - ) -> BoundedVec> { + exposures: Vec<(T::AccountId, Exposure>)>, + ) -> Vec { // Increment or set current era. let new_planned_era = CurrentEra::::mutate(|s| { *s = Some(s.map(|s| s + 1).unwrap_or(0)); @@ -442,7 +416,7 @@ impl Pallet { ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); // Clean old era information. - if let Some(old_era) = new_planned_era.checked_sub(T::HistoryDepth::get() + 1) { + if let Some(old_era) = new_planned_era.checked_sub(Self::history_depth() + 1) { Self::clear_era_information(old_era); } @@ -459,26 +433,19 @@ impl Pallet { pub(crate) fn try_trigger_new_era( start_session_index: SessionIndex, is_genesis: bool, - ) -> Option>> { - let election_result: BoundedVec<_, MaxWinnersOf> = if is_genesis { - let result = ::elect().map_err(|e| { + ) -> Option> { + let election_result = if is_genesis { + T::GenesisElectionProvider::elect().map_err(|e| { log!(warn, "genesis election provider failed due to {:?}", e); Self::deposit_event(Event::StakingElectionFailed); - }); - - result - .ok()? - .into_inner() - .try_into() - // both bounds checked in integrity test to be equal - .defensive_unwrap_or_default() + }) } else { - let result = ::elect().map_err(|e| { + T::ElectionProvider::elect().map_err(|e| { log!(warn, "election provider failed due to {:?}", e); Self::deposit_event(Event::StakingElectionFailed); - }); - result.ok()? - }; + }) + } + .ok()?; let exposures = Self::collect_exposures(election_result); if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { @@ -515,19 +482,10 @@ impl Pallet { /// /// Store staking information for the new planned era pub fn store_stakers_info( - exposures: BoundedVec< - (T::AccountId, Exposure>), - MaxWinnersOf, - >, + exposures: Vec<(T::AccountId, Exposure>)>, new_planned_era: EraIndex, - ) -> BoundedVec> { - let elected_stashes: BoundedVec<_, MaxWinnersOf> = exposures - .iter() - .cloned() - .map(|(x, _)| x) - .collect::>() - .try_into() - .expect("since we only map through exposures, size of elected_stashes is always same as exposures; qed"); + ) -> Vec { + let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); // Populate stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); @@ -565,11 +523,11 @@ impl Pallet { elected_stashes } - /// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a + /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a /// [`Exposure`]. fn collect_exposures( - supports: BoundedSupportsOf, - ) -> BoundedVec<(T::AccountId, Exposure>), MaxWinnersOf> { + supports: Supports, + ) -> Vec<(T::AccountId, Exposure>)> { let total_issuance = T::Currency::total_issuance(); let to_currency = |e: frame_election_provider_support::ExtendedBalance| { T::CurrencyToVote::to_currency(e, total_issuance) @@ -598,8 +556,7 @@ impl Pallet { let exposure = Exposure { own, others, total }; (validator, exposure) }) - .try_collect() - .expect("we only map through support vector which cannot change the size; qed") + .collect::)>>() } /// Remove all associated data of a stash account from the staking system. @@ -688,10 +645,10 @@ impl Pallet { #[cfg(feature = "runtime-benchmarks")] pub fn add_era_stakers( current_era: EraIndex, - stash: T::AccountId, + controller: T::AccountId, exposure: Exposure>, ) { - >::insert(¤t_era, &stash, &exposure); + >::insert(¤t_era, &controller, &exposure); } #[cfg(feature = "runtime-benchmarks")] @@ -798,32 +755,18 @@ impl Pallet { /// Get the targets for an upcoming npos election. /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. - pub fn get_npos_targets(maybe_max_len: Option) -> Vec { - let max_allowed_len = maybe_max_len.unwrap_or_else(|| T::TargetList::count() as usize); - let mut all_targets = Vec::::with_capacity(max_allowed_len); - let mut targets_seen = 0; - - let mut targets_iter = T::TargetList::iter(); - while all_targets.len() < max_allowed_len && - targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) - { - let target = match targets_iter.next() { - Some(target) => { - targets_seen.saturating_inc(); - target - }, - None => break, - }; - - if Validators::::contains_key(&target) { - all_targets.push(target); - } - } + pub fn get_npos_targets() -> Vec { + let mut validator_count = 0u32; + let targets = Validators::::iter() + .map(|(v, _)| { + validator_count.saturating_inc(); + v + }) + .collect::>(); - Self::register_weight(T::WeightInfo::get_npos_targets(all_targets.len() as u32)); - log!(info, "generated {} npos targets", all_targets.len()); + Self::register_weight(T::WeightInfo::get_npos_targets(validator_count)); - all_targets + targets } /// This function will add a nominator to the `Nominators` storage map, @@ -846,6 +789,7 @@ impl Pallet { Nominators::::count() + Validators::::count(), T::VoterList::count() ); + debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); } /// This function will remove a nominator from the `Nominators` storage map, @@ -865,6 +809,7 @@ impl Pallet { false }; + debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); debug_assert_eq!( Nominators::::count() + Validators::::count(), T::VoterList::count() @@ -892,6 +837,7 @@ impl Pallet { Nominators::::count() + Validators::::count(), T::VoterList::count() ); + debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); } /// This function will remove a validator from the `Validators` storage map. @@ -910,6 +856,7 @@ impl Pallet { false }; + debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); debug_assert_eq!( Nominators::::count() + Validators::::count(), T::VoterList::count() @@ -955,7 +902,7 @@ impl ElectionDataProvider for Pallet { return Err("Target snapshot too big") } - Ok(Self::get_npos_targets(None)) + Ok(Self::get_npos_targets()) } fn next_election_prediction(now: T::BlockNumber) -> T::BlockNumber { @@ -1008,7 +955,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: Default::default(), + claimed_rewards: vec![], }, ); @@ -1026,7 +973,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: Default::default(), + claimed_rewards: vec![], }, ); Self::do_add_validator( @@ -1067,7 +1014,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: Default::default(), + claimed_rewards: vec![], }, ); Self::do_add_validator( @@ -1088,7 +1035,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: Default::default(), + claimed_rewards: vec![], }, ); Self::do_add_nominator( @@ -1108,12 +1055,12 @@ impl pallet_session::SessionManager for Pallet { fn new_session(new_index: SessionIndex) -> Option> { log!(trace, "planning new session {}", new_index); CurrentPlannedSession::::put(new_index); - Self::new_session(new_index, false).map(|v| v.into_inner()) + Self::new_session(new_index, false) } fn new_session_genesis(new_index: SessionIndex) -> Option> { log!(trace, "planning new session {} at genesis", new_index); CurrentPlannedSession::::put(new_index); - Self::new_session(new_index, true).map(|v| v.into_inner()) + Self::new_session(new_index, true) } fn start_session(start_index: SessionIndex) { log!(trace, "starting session {}", start_index); @@ -1220,7 +1167,7 @@ where disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); - let mut consumed_weight = Weight::from_ref_time(0); + let mut consumed_weight: Weight = 0; let mut add_db_reads_writes = |reads, writes| { consumed_weight += T::DbWeight::get().reads_writes(reads, writes); }; @@ -1338,7 +1285,7 @@ impl ScoreProvider for Pallet { Self::weight_of(who) } - #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz"))] + #[cfg(feature = "runtime-benchmarks")] fn set_score_of(who: &T::AccountId, weight: Self::Score) { // this will clearly results in an inconsistent state, but it should not matter for a // benchmark. @@ -1362,70 +1309,6 @@ impl ScoreProvider for Pallet { } } -/// A simple sorted list implementation that does not require any additional pallets. Note, this -/// does not provide validators in sorted order. If you desire nominators in a sorted order take -/// a look at [`pallet-bags-list`]. -pub struct UseValidatorsMap(sp_std::marker::PhantomData); -impl SortedListProvider for UseValidatorsMap { - type Score = BalanceOf; - type Error = (); - - /// Returns iterator over voter list, which can have `take` called on it. - fn iter() -> Box> { - Box::new(Validators::::iter().map(|(v, _)| v)) - } - fn iter_from( - start: &T::AccountId, - ) -> Result>, Self::Error> { - if Validators::::contains_key(start) { - let start_key = Validators::::hashed_key_for(start); - Ok(Box::new(Validators::::iter_from(start_key).map(|(n, _)| n))) - } else { - Err(()) - } - } - fn count() -> u32 { - Validators::::count() - } - fn contains(id: &T::AccountId) -> bool { - Validators::::contains_key(id) - } - fn on_insert(_: T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { - // nothing to do on insert. - Ok(()) - } - fn get_score(id: &T::AccountId) -> Result { - Ok(Pallet::::weight_of(id).into()) - } - fn on_update(_: &T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { - // nothing to do on update. - Ok(()) - } - fn on_remove(_: &T::AccountId) -> Result<(), Self::Error> { - // nothing to do on remove. - Ok(()) - } - fn unsafe_regenerate( - _: impl IntoIterator, - _: Box Self::Score>, - ) -> u32 { - // nothing to do upon regenerate. - 0 - } - fn try_state() -> Result<(), &'static str> { - Ok(()) - } - fn unsafe_clear() { - #[allow(deprecated)] - Validators::::remove_all(); - } - - #[cfg(feature = "runtime-benchmarks")] - fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { - unimplemented!() - } -} - /// A simple voter list implementation that does not require any additional pallets. Note, this /// does not provided nominators in sorted ordered. If you desire nominators in a sorted order take /// a look at [`pallet-bags-list]. @@ -1486,7 +1369,7 @@ impl SortedListProvider for UseNominatorsAndValidatorsM // nothing to do upon regenerate. 0 } - fn try_state() -> Result<(), &'static str> { + fn sanity_check() -> Result<(), &'static str> { Ok(()) } @@ -1498,51 +1381,16 @@ impl SortedListProvider for UseNominatorsAndValidatorsM #[allow(deprecated)] Validators::::remove_all(); } - - #[cfg(feature = "runtime-benchmarks")] - fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { - unimplemented!() - } } -// NOTE: in this entire impl block, the assumption is that `who` is a stash account. impl StakingInterface for Pallet { type AccountId = T::AccountId; type Balance = BalanceOf; - fn minimum_nominator_bond() -> Self::Balance { + fn minimum_bond() -> Self::Balance { MinNominatorBond::::get() } - fn minimum_validator_bond() -> Self::Balance { - MinValidatorBond::::get() - } - - fn desired_validator_count() -> u32 { - ValidatorCount::::get() - } - - fn election_ongoing() -> bool { - T::ElectionProvider::ongoing() - } - - fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { - let num_slashing_spans = Self::slashing_spans(&who).iter().count() as u32; - Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) - } - - fn stash_by_ctrl(controller: &Self::AccountId) -> Result { - Self::ledger(controller) - .map(|l| l.stash) - .ok_or(Error::::NotController.into()) - } - - fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { - ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { - validator == *who || exposures.others.iter().any(|i| i.who == *who) - }) - } - fn bonding_duration() -> EraIndex { T::BondingDuration::get() } @@ -1551,191 +1399,56 @@ impl StakingInterface for Pallet { Self::current_era().unwrap_or(Zero::zero()) } - fn stake(who: &Self::AccountId) -> Result, DispatchError> { - Self::bonded(who) - .and_then(|c| Self::ledger(c)) - .map(|l| Stake { stash: l.stash, total: l.total, active: l.active }) - .ok_or(Error::::NotStash.into()) + fn active_stake(controller: &Self::AccountId) -> Option { + Self::ledger(controller).map(|l| l.active) + } + + fn total_stake(controller: &Self::AccountId) -> Option { + Self::ledger(controller).map(|l| l.total) } - fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { - Self::bond_extra(RawOrigin::Signed(who.clone()).into(), extra) + fn bond_extra(stash: Self::AccountId, extra: Self::Balance) -> DispatchResult { + Self::bond_extra(RawOrigin::Signed(stash).into(), extra) } - fn unbond(who: &Self::AccountId, value: Self::Balance) -> DispatchResult { - let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; - Self::unbond(RawOrigin::Signed(ctrl).into(), value) + fn unbond(controller: Self::AccountId, value: Self::Balance) -> DispatchResult { + Self::unbond(RawOrigin::Signed(controller).into(), value) } - fn chill(who: &Self::AccountId) -> DispatchResult { - // defensive-only: any account bonded via this interface has the stash set as the - // controller, but we have to be sure. Same comment anywhere else that we read this. - let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; - Self::chill(RawOrigin::Signed(ctrl).into()) + fn chill(controller: Self::AccountId) -> DispatchResult { + Self::chill(RawOrigin::Signed(controller).into()) } fn withdraw_unbonded( - who: Self::AccountId, + controller: Self::AccountId, num_slashing_spans: u32, ) -> Result { - let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; - Self::withdraw_unbonded(RawOrigin::Signed(ctrl.clone()).into(), num_slashing_spans) - .map(|_| !Ledger::::contains_key(&ctrl)) + Self::withdraw_unbonded(RawOrigin::Signed(controller.clone()).into(), num_slashing_spans) + .map(|_| !Ledger::::contains_key(&controller)) .map_err(|with_post| with_post.error) } fn bond( - who: &Self::AccountId, + stash: Self::AccountId, + controller: Self::AccountId, value: Self::Balance, - payee: &Self::AccountId, + payee: Self::AccountId, ) -> DispatchResult { Self::bond( - RawOrigin::Signed(who.clone()).into(), - T::Lookup::unlookup(who.clone()), + RawOrigin::Signed(stash).into(), + T::Lookup::unlookup(controller), value, - RewardDestination::Account(payee.clone()), + RewardDestination::Account(payee), ) } - fn nominate(who: &Self::AccountId, targets: Vec) -> DispatchResult { - let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + fn nominate(controller: Self::AccountId, targets: Vec) -> DispatchResult { let targets = targets.into_iter().map(T::Lookup::unlookup).collect::>(); - Self::nominate(RawOrigin::Signed(ctrl).into(), targets) + Self::nominate(RawOrigin::Signed(controller).into(), targets) } #[cfg(feature = "runtime-benchmarks")] fn nominations(who: Self::AccountId) -> Option> { Nominators::::get(who).map(|n| n.targets.into_inner()) } - - #[cfg(feature = "runtime-benchmarks")] - fn add_era_stakers( - current_era: &EraIndex, - stash: &T::AccountId, - exposures: Vec<(Self::AccountId, Self::Balance)>, - ) { - let others = exposures - .iter() - .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) - .collect::>(); - let exposure = Exposure { total: Default::default(), own: Default::default(), others }; - Self::add_era_stakers(current_era.clone(), stash.clone(), exposure) - } - - #[cfg(feature = "runtime-benchmarks")] - fn set_current_era(era: EraIndex) { - CurrentEra::::put(era); - } -} - -#[cfg(any(test, feature = "try-runtime"))] -impl Pallet { - pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), &'static str> { - ensure!( - T::VoterList::iter() - .all(|x| >::contains_key(&x) || >::contains_key(&x)), - "VoterList contains non-nominators" - ); - T::VoterList::try_state()?; - Self::check_nominators()?; - Self::check_exposures()?; - Self::check_ledgers()?; - Self::check_count() - } - - fn check_count() -> Result<(), &'static str> { - ensure!( - ::VoterList::count() == - Nominators::::count() + Validators::::count(), - "wrong external count" - ); - - ensure!( - ValidatorCount::::get() <= - ::MaxWinners::get(), - "validator count exceeded election max winners" - ); - Ok(()) - } - - fn check_ledgers() -> Result<(), &'static str> { - Bonded::::iter() - .map(|(_, ctrl)| Self::ensure_ledger_consistent(ctrl)) - .collect::>() - } - - fn check_exposures() -> Result<(), &'static str> { - // a check per validator to ensure the exposure struct is always sane. - let era = Self::active_era().unwrap().index; - ErasStakers::::iter_prefix_values(era) - .map(|expo| { - ensure!( - expo.total == - expo.own + - expo.others - .iter() - .map(|e| e.value) - .fold(Zero::zero(), |acc, x| acc + x), - "wrong total exposure.", - ); - Ok(()) - }) - .collect::>() - } - - fn check_nominators() -> Result<(), &'static str> { - // a check per nominator to ensure their entire stake is correctly distributed. Will only - // kick-in if the nomination was submitted before the current era. - let era = Self::active_era().unwrap().index; - >::iter() - .filter_map( - |(nominator, nomination)| { - if nomination.submitted_in > era { - Some(nominator) - } else { - None - } - }, - ) - .map(|nominator| { - // must be bonded. - Self::ensure_is_stash(&nominator)?; - let mut sum = BalanceOf::::zero(); - T::SessionInterface::validators() - .iter() - .map(|v| Self::eras_stakers(era, v)) - .map(|e| { - let individual = - e.others.iter().filter(|e| e.who == nominator).collect::>(); - let len = individual.len(); - match len { - 0 => { /* not supporting this validator at all. */ }, - 1 => sum += individual[0].value, - _ => return Err("nominator cannot back a validator more than once."), - }; - Ok(()) - }) - .collect::>() - }) - .collect::>() - } - - fn ensure_is_stash(who: &T::AccountId) -> Result<(), &'static str> { - ensure!(Self::bonded(who).is_some(), "Not a stash."); - Ok(()) - } - - fn ensure_ledger_consistent(ctrl: T::AccountId) -> Result<(), &'static str> { - // ensures ledger.total == ledger.active + sum(ledger.unlocking). - let ledger = Self::ledger(ctrl.clone()).ok_or("Not a controller.")?; - let real_total: BalanceOf = - ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); - ensure!(real_total == ledger.total, "ledger.total corrupt"); - - if !(ledger.active >= T::Currency::minimum_balance() || ledger.active.is_zero()) { - log!(warn, "ledger.active less than ED: {:?}, {:?}", ctrl, ledger) - } - - Ok(()) - } } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 8fddba2150370..4ce96ab68b11a 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -17,36 +17,32 @@ //! Staking FRAME Pallet. -use frame_election_provider_support::{ - ElectionProvider, ElectionProviderBase, SortedListProvider, VoteWeight, -}; +use frame_election_provider_support::{SortedListProvider, VoteWeight}; use frame_support::{ dispatch::Codec, pallet_prelude::*, traits::{ - Currency, CurrencyToVote, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, - EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, TryCollect, - UnixTime, + Currency, CurrencyToVote, Defensive, DefensiveSaturating, EnsureOrigin, + EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, UnixTime, }, weights::Weight, - BoundedVec, }; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, - ArithmeticError, Perbill, Percent, + Perbill, Percent, }; use sp_staking::{EraIndex, SessionIndex}; -use sp_std::prelude::*; +use sp_std::{cmp::max, prelude::*}; mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, - Releases, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, EraRewardPoints, Exposure, + Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, Releases, + RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, }; @@ -62,6 +58,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(crate) trait Store)] + #[pallet::without_storage_info] pub struct Pallet(_); /// Possible operations on the configuration values of this pallet. @@ -109,7 +106,7 @@ pub mod pallet { type CurrencyToVote: CurrencyToVote>; /// Something that provides the election functionality. - type ElectionProvider: ElectionProvider< + type ElectionProvider: frame_election_provider_support::ElectionProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, // we only accept an election provider that has staking as data provider. @@ -117,7 +114,7 @@ pub mod pallet { >; /// Something that provides the election functionality at genesis. - type GenesisElectionProvider: ElectionProvider< + type GenesisElectionProvider: frame_election_provider_support::ElectionProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Pallet, @@ -127,35 +124,12 @@ pub mod pallet { #[pallet::constant] type MaxNominations: Get; - /// Number of eras to keep in history. - /// - /// Following information is kept for eras in `[current_era - - /// HistoryDepth, current_era]`: `ErasStakers`, `ErasStakersClipped`, - /// `ErasValidatorPrefs`, `ErasValidatorReward`, `ErasRewardPoints`, - /// `ErasTotalStake`, `ErasStartSessionIndex`, - /// `StakingLedger.claimed_rewards`. - /// - /// Must be more than the number of eras delayed by session. - /// I.e. active era must always be in history. I.e. `active_era > - /// current_era - history_depth` must be guaranteed. - /// - /// If migrating an existing pallet from storage value to config value, - /// this should be set to same value or greater as in storage. - /// - /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` - /// item `StakingLedger.claimed_rewards`. Setting this value lower than - /// the existing value can lead to inconsistencies in the - /// `StakingLedger` and will need to be handled properly in a migration. - /// The test `reducing_history_depth_abrupt` shows this effect. - #[pallet::constant] - type HistoryDepth: Get; - /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). type RewardRemainder: OnUnbalanced>; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Handler for the unbalanced reduction when slashing a staker. type Slash: OnUnbalanced>; @@ -181,7 +155,7 @@ pub mod pallet { type SlashDeferDuration: Get; /// The origin which can cancel a deferred slash. Root can always do this. - type SlashCancelOrigin: EnsureOrigin; + type SlashCancelOrigin: EnsureOrigin; /// Interface for interacting with a session pallet. type SessionInterface: SessionInterface; @@ -210,46 +184,10 @@ pub mod pallet { /// /// The changes to nominators are reported to this. Moreover, each validator's self-vote is /// also reported as one independent vote. - /// - /// To keep the load off the chain as much as possible, changes made to the staked amount - /// via rewards and slashes are not reported and thus need to be manually fixed by the - /// staker. In case of `bags-list`, this always means using `rebag` and `putInFrontOf`. - /// - /// Invariant: what comes out of this list will always be a nominator. type VoterList: SortedListProvider; - /// WIP: This is a noop as of now, the actual business logic that's described below is going - /// to be introduced in a follow-up PR. - /// - /// Something that provides a best-effort sorted list of targets aka electable validators, - /// used for NPoS election. - /// - /// The changes to the approval stake of each validator are reported to this. This means any - /// change to: - /// 1. The stake of any validator or nominator. - /// 2. The targets of any nominator - /// 3. The role of any staker (e.g. validator -> chilled, nominator -> validator, etc) - /// - /// Unlike `VoterList`, the values in this list are always kept up to date with reward and - /// slash as well, and thus represent the accurate approval stake of all account being - /// nominated by nominators. - /// - /// Note that while at the time of nomination, all targets are checked to be real - /// validators, they can chill at any point, and their approval stakes will still be - /// recorded. This implies that what comes out of iterating this list MIGHT NOT BE AN ACTIVE - /// VALIDATOR. - type TargetList: SortedListProvider>; - - /// The maximum number of `unlocking` chunks a [`StakingLedger`] can - /// have. Effectively determines how many unique eras a staker may be - /// unbonding in. - /// - /// Note: `MaxUnlockingChunks` is used as the upper bound for the - /// `BoundedVec` item `StakingLedger.unlocking`. Setting this value - /// lower than the existing value can lead to inconsistencies in the - /// `StakingLedger` and will need to be handled properly in a runtime - /// migration. The test `reducing_max_unlocking_chunks_abrupt` shows - /// this effect. + /// The maximum number of `unlocking` chunks a [`StakingLedger`] can have. Effectively + /// determines how many unique eras a staker may be unbonding in. #[pallet::constant] type MaxUnlockingChunks: Get; @@ -264,7 +202,23 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The ideal number of active validators. + #[pallet::type_value] + pub(crate) fn HistoryDepthOnEmpty() -> u32 { + 84u32 + } + + /// Number of eras to keep in history. + /// + /// Information is kept for eras in `[current_era - history_depth; current_era]`. + /// + /// Must be more than the number of eras delayed by session otherwise. I.e. active era must + /// always be in history. I.e. `active_era > current_era - history_depth` must be + /// guaranteed. + #[pallet::storage] + #[pallet::getter(fn history_depth)] + pub(crate) type HistoryDepth = StorageValue<_, u32, ValueQuery, HistoryDepthOnEmpty>; + + /// The ideal number of staking participants. #[pallet::storage] #[pallet::getter(fn validator_count)] pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; @@ -279,7 +233,6 @@ pub mod pallet { /// invulnerables) and restricted to testnets. #[pallet::storage] #[pallet::getter(fn invulnerables)] - #[pallet::unbounded] pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; /// Map from all locked "stash" accounts to the controller account. @@ -383,7 +336,6 @@ pub mod pallet { /// If stakers hasn't been set or has been removed then empty exposure is returned. #[pallet::storage] #[pallet::getter(fn eras_stakers)] - #[pallet::unbounded] pub type ErasStakers = StorageDoubleMap< _, Twox64Concat, @@ -406,7 +358,6 @@ pub mod pallet { /// Is it removed after `HISTORY_DEPTH` eras. /// If stakers hasn't been set or has been removed then empty exposure is returned. #[pallet::storage] - #[pallet::unbounded] #[pallet::getter(fn eras_stakers_clipped)] pub type ErasStakersClipped = StorageDoubleMap< _, @@ -446,7 +397,6 @@ pub mod pallet { /// Rewards for the last `HISTORY_DEPTH` eras. /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] - #[pallet::unbounded] #[pallet::getter(fn eras_reward_points)] pub type ErasRewardPoints = StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; @@ -478,7 +428,6 @@ pub mod pallet { /// All unapplied slashes that are queued for later. #[pallet::storage] - #[pallet::unbounded] pub type UnappliedSlashes = StorageMap< _, Twox64Concat, @@ -492,7 +441,6 @@ pub mod pallet { /// Must contains information for eras for the range: /// `[active_era - bounding_duration; active_era]` #[pallet::storage] - #[pallet::unbounded] pub(crate) type BondedEras = StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; @@ -515,8 +463,6 @@ pub mod pallet { /// Slashing spans for stash accounts. #[pallet::storage] - #[pallet::getter(fn slashing_spans)] - #[pallet::unbounded] pub(crate) type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; @@ -548,7 +494,6 @@ pub mod pallet { /// whether a given validator has previously offended using binary search. It gets cleared when /// the era ends. #[pallet::storage] - #[pallet::unbounded] #[pallet::getter(fn offending_validators)] pub type OffendingValidators = StorageValue<_, Vec<(u32, bool)>, ValueQuery>; @@ -567,6 +512,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { + pub history_depth: u32, pub validator_count: u32, pub minimum_validator_count: u32, pub invulnerables: Vec, @@ -585,6 +531,7 @@ pub mod pallet { impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { + history_depth: 84u32, validator_count: Default::default(), minimum_validator_count: Default::default(), invulnerables: Default::default(), @@ -603,6 +550,7 @@ pub mod pallet { #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { + HistoryDepth::::put(self.history_depth); ValidatorCount::::put(self.validator_count); MinimumValidatorCount::::put(self.minimum_validator_count); Invulnerables::::put(&self.invulnerables); @@ -632,26 +580,22 @@ pub mod pallet { "Stash does not have enough balance to bond." ); frame_support::assert_ok!(>::bond( - T::RuntimeOrigin::from(Some(stash.clone()).into()), + T::Origin::from(Some(stash.clone()).into()), T::Lookup::unlookup(controller.clone()), balance, RewardDestination::Staked, )); frame_support::assert_ok!(match status { crate::StakerStatus::Validator => >::validate( - T::RuntimeOrigin::from(Some(controller.clone()).into()), + T::Origin::from(Some(controller.clone()).into()), Default::default(), ), crate::StakerStatus::Nominator(votes) => >::nominate( - T::RuntimeOrigin::from(Some(controller.clone()).into()), + T::Origin::from(Some(controller.clone()).into()), votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), ), _ => Ok(()), }); - assert!( - ValidatorCount::::get() <= - ::MaxWinners::get() - ); } // all voters are reported to the `VoterList`. @@ -668,36 +612,39 @@ pub mod pallet { pub enum Event { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. - EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, - /// The nominator has been rewarded by this amount. - Rewarded { stash: T::AccountId, amount: BalanceOf }, - /// One staker (and potentially its nominators) has been slashed by the given amount. - Slashed { staker: T::AccountId, amount: BalanceOf }, + /// \[era_index, validator_payout, remainder\] + EraPaid(EraIndex, BalanceOf, BalanceOf), + /// The nominator has been rewarded by this amount. \[stash, amount\] + Rewarded(T::AccountId, BalanceOf), + /// One validator (and its nominators) has been slashed by the given amount. + /// \[validator, amount\] + Slashed(T::AccountId, BalanceOf), /// An old slashing report from a prior era was discarded because it could - /// not be processed. - OldSlashingReportDiscarded { session_index: SessionIndex }, + /// not be processed. \[session_index\] + OldSlashingReportDiscarded(SessionIndex), /// A new set of stakers was elected. StakersElected, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. - Bonded { stash: T::AccountId, amount: BalanceOf }, - /// An account has unbonded this amount. - Unbonded { stash: T::AccountId, amount: BalanceOf }, + Bonded(T::AccountId, BalanceOf), + /// An account has unbonded this amount. \[stash, amount\] + Unbonded(T::AccountId, BalanceOf), /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` - /// from the unlocking queue. - Withdrawn { stash: T::AccountId, amount: BalanceOf }, - /// A nominator has been kicked from a validator. - Kicked { nominator: T::AccountId, stash: T::AccountId }, + /// from the unlocking queue. \[stash, amount\] + Withdrawn(T::AccountId, BalanceOf), + /// A nominator has been kicked from a validator. \[nominator, stash\] + Kicked(T::AccountId, T::AccountId), /// The election failed. No new era is planned. StakingElectionFailed, /// An account has stopped participating as either a validator or nominator. - Chilled { stash: T::AccountId }, - /// The stakers' rewards are getting paid. - PayoutStarted { era_index: EraIndex, validator_stash: T::AccountId }, + /// \[stash\] + Chilled(T::AccountId), + /// The stakers' rewards are getting paid. \[era_index, validator_stash\] + PayoutStarted(EraIndex, T::AccountId), /// A validator has set their preferences. - ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, + ValidatorPrefsSet(T::AccountId, ValidatorPrefs), } #[pallet::error] @@ -749,13 +696,11 @@ pub mod pallet { /// There are too many nominators in the system. Governance needs to adjust the staking /// settings to keep things safe for the runtime. TooManyNominators, - /// There are too many validator candidates in the system. Governance needs to adjust the - /// staking settings to keep things safe for the runtime. + /// There are too many validators in the system. Governance needs to adjust the staking + /// settings to keep things safe for the runtime. TooManyValidators, /// Commission is too low. Must be at least `MinCommission`. CommissionTooLow, - /// Some bound is not met. - BoundNotMet, } #[pallet::hooks] @@ -788,12 +733,6 @@ pub mod pallet { // and that MaxNominations is always greater than 1, since we count on this. assert!(!T::MaxNominations::get().is_zero()); - // ensure election results are always bounded with the same value - assert!( - ::MaxWinners::get() == - ::MaxWinners::get() - ); - sp_std::if_std! { sp_io::TestExternalities::new_empty().execute_with(|| assert!( @@ -805,11 +744,6 @@ pub mod pallet { ); } } - - #[cfg(feature = "try-runtime")] - fn try_state(n: BlockNumberFor) -> Result<(), &'static str> { - Self::do_try_state(n) - } } #[pallet::call] @@ -834,7 +768,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::bond())] pub fn bond( origin: OriginFor, - controller: AccountIdLookupOf, + controller: ::Source, #[pallet::compact] value: BalanceOf, payee: RewardDestination, ) -> DispatchResult { @@ -863,23 +797,18 @@ pub mod pallet { >::insert(&stash, payee); let current_era = CurrentEra::::get().unwrap_or(0); - let history_depth = T::HistoryDepth::get(); + let history_depth = Self::history_depth(); let last_reward_era = current_era.saturating_sub(history_depth); let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); - Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); + Self::deposit_event(Event::::Bonded(stash.clone(), value)); let item = StakingLedger { stash, total: value, active: value, unlocking: Default::default(), - claimed_rewards: (last_reward_era..current_era) - .try_collect() - // Since last_reward_era is calculated as `current_era - - // HistoryDepth`, following bound is always expected to be - // satisfied. - .defensive_map_err(|_| Error::::BoundNotMet)?, + claimed_rewards: (last_reward_era..current_era).collect(), }; Self::update_ledger(&controller, &item); Ok(()) @@ -927,9 +856,10 @@ pub mod pallet { if T::VoterList::contains(&stash) { let _ = T::VoterList::on_update(&stash, Self::weight_of(&ledger.stash)).defensive(); + debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); } - Self::deposit_event(Event::::Bonded { stash, amount: extra }); + Self::deposit_event(Event::::Bonded(stash, extra)); } Ok(()) } @@ -961,7 +891,7 @@ pub mod pallet { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( - ledger.unlocking.len() < T::MaxUnlockingChunks::get() as usize, + ledger.unlocking.len() < MaxUnlockingChunks::get() as usize, Error::::NoMoreChunks, ); @@ -1012,7 +942,7 @@ pub mod pallet { .defensive(); } - Self::deposit_event(Event::::Unbonded { stash: ledger.stash, amount: value }); + Self::deposit_event(Event::::Unbonded(ledger.stash, value)); } Ok(()) } @@ -1068,7 +998,7 @@ pub mod pallet { if ledger.total < old_total { // Already checked that this won't overflow by entry condition. let value = old_total - ledger.total; - Self::deposit_event(Event::::Withdrawn { stash, amount: value }); + Self::deposit_event(Event::::Withdrawn(stash, value)); } Ok(post_info_weight.into()) @@ -1106,7 +1036,7 @@ pub mod pallet { Self::do_remove_nominator(stash); Self::do_add_validator(stash, prefs.clone()); - Self::deposit_event(Event::::ValidatorPrefsSet { stash: ledger.stash, prefs }); + Self::deposit_event(Event::::ValidatorPrefsSet(ledger.stash, prefs)); Ok(()) } @@ -1125,7 +1055,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] pub fn nominate( origin: OriginFor, - targets: Vec>, + targets: Vec<::Source>, ) -> DispatchResult { let controller = ensure_signed(origin)?; @@ -1245,7 +1175,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_controller())] pub fn set_controller( origin: OriginFor, - controller: AccountIdLookupOf, + controller: ::Source, ) -> DispatchResult { let stash = ensure_signed(origin)?; let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1276,18 +1206,11 @@ pub mod pallet { #[pallet::compact] new: u32, ) -> DispatchResult { ensure_root(origin)?; - // ensure new validator count does not exceed maximum winners - // support by election provider. - ensure!( - new <= ::MaxWinners::get(), - Error::::TooManyValidators - ); ValidatorCount::::put(new); Ok(()) } - /// Increments the ideal number of validators upto maximum of - /// `ElectionProviderBase::MaxWinners`. + /// Increments the ideal number of validators. /// /// The dispatch origin must be Root. /// @@ -1300,19 +1223,11 @@ pub mod pallet { #[pallet::compact] additional: u32, ) -> DispatchResult { ensure_root(origin)?; - let old = ValidatorCount::::get(); - let new = old.checked_add(additional).ok_or(ArithmeticError::Overflow)?; - ensure!( - new <= ::MaxWinners::get(), - Error::::TooManyValidators - ); - - ValidatorCount::::put(new); + ValidatorCount::::mutate(|n| *n += additional); Ok(()) } - /// Scale up the ideal number of validators by a factor upto maximum of - /// `ElectionProviderBase::MaxWinners`. + /// Scale up the ideal number of validators by a factor. /// /// The dispatch origin must be Root. /// @@ -1322,15 +1237,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_validator_count())] pub fn scale_validator_count(origin: OriginFor, factor: Percent) -> DispatchResult { ensure_root(origin)?; - let old = ValidatorCount::::get(); - let new = old.checked_add(factor.mul_floor(old)).ok_or(ArithmeticError::Overflow)?; - - ensure!( - new <= ::MaxWinners::get(), - Error::::TooManyValidators - ); - - ValidatorCount::::put(new); + ValidatorCount::::mutate(|n| *n += factor * *n); Ok(()) } @@ -1498,7 +1405,7 @@ pub mod pallet { /// - Bounded by `MaxUnlockingChunks`. /// - Storage changes: Can't increase storage, only decrease it. /// # - #[pallet::weight(T::WeightInfo::rebond(T::MaxUnlockingChunks::get() as u32))] + #[pallet::weight(T::WeightInfo::rebond(MaxUnlockingChunks::get() as u32))] pub fn rebond( origin: OriginFor, #[pallet::compact] value: BalanceOf, @@ -1512,10 +1419,7 @@ pub mod pallet { // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); - Self::deposit_event(Event::::Bonded { - stash: ledger.stash.clone(), - amount: rebonded_value, - }); + Self::deposit_event(Event::::Bonded(ledger.stash.clone(), rebonded_value)); // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); @@ -1530,6 +1434,48 @@ pub mod pallet { Ok(Some(T::WeightInfo::rebond(removed_chunks)).into()) } + /// Set `HistoryDepth` value. This function will delete any history information + /// when `HistoryDepth` is reduced. + /// + /// Parameters: + /// - `new_history_depth`: The new history depth you would like to set. + /// - `era_items_deleted`: The number of items that will be deleted by this dispatch. This + /// should report all the storage items that will be deleted by clearing old era history. + /// Needed to report an accurate weight for the dispatch. Trusted by `Root` to report an + /// accurate number. + /// + /// Origin must be root. + /// + /// # + /// - E: Number of history depths removed, i.e. 10 -> 7 = 3 + /// - Weight: O(E) + /// - DB Weight: + /// - Reads: Current Era, History Depth + /// - Writes: History Depth + /// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs + /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, + /// ErasStartSessionIndex + /// # + #[pallet::weight(T::WeightInfo::set_history_depth(*_era_items_deleted))] + pub fn set_history_depth( + origin: OriginFor, + #[pallet::compact] new_history_depth: EraIndex, + #[pallet::compact] _era_items_deleted: u32, + ) -> DispatchResult { + ensure_root(origin)?; + if let Some(current_era) = Self::current_era() { + HistoryDepth::::mutate(|history_depth| { + let last_kept = current_era.saturating_sub(*history_depth); + let new_last_kept = current_era.saturating_sub(new_history_depth); + for era_index in last_kept..new_last_kept { + Self::clear_era_information(era_index); + } + *history_depth = new_history_depth + }) + } + Ok(()) + } + /// Remove all data structures concerning a staker/stash once it is at a state where it can /// be considered `dust` in the staking system. The requirements are: /// @@ -1575,7 +1521,10 @@ pub mod pallet { /// Note: Making this call only makes sense if you first set the validator preferences to /// block any further nominations. #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] - pub fn kick(origin: OriginFor, who: Vec>) -> DispatchResult { + pub fn kick( + origin: OriginFor, + who: Vec<::Source>, + ) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1590,10 +1539,10 @@ pub mod pallet { if let Some(ref mut nom) = maybe_nom { if let Some(pos) = nom.targets.iter().position(|v| v == stash) { nom.targets.swap_remove(pos); - Self::deposit_event(Event::::Kicked { - nominator: nom_stash.clone(), - stash: stash.clone(), - }); + Self::deposit_event(Event::::Kicked( + nom_stash.clone(), + stash.clone(), + )); } } }); @@ -1615,16 +1564,16 @@ pub mod pallet { /// * `min_commission`: The minimum amount of commission that each validators must maintain. /// This is checked only upon calling `validate`. Existing validators are not affected. /// - /// RuntimeOrigin must be Root to call this function. + /// Origin must be Root to call this function. /// /// NOTE: Existing nominators and validators will not be affected by this update. /// to kick people under the new limits, `chill_other` should be called. // We assume the worst case for this call is either: all items are set or all items are // removed. - #[pallet::weight( - T::WeightInfo::set_staking_configs_all_set() - .max(T::WeightInfo::set_staking_configs_all_remove()) - )] + #[pallet::weight(max( + T::WeightInfo::set_staking_configs_all_set(), + T::WeightInfo::set_staking_configs_all_remove() + ))] pub fn set_staking_configs( origin: OriginFor, min_nominator_bond: ConfigOp>, diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index a1900136d64fd..7372c4390f816 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -53,7 +53,7 @@ use crate::{ BalanceOf, Config, Error, Exposure, NegativeImbalanceOf, Pallet, Perbill, SessionInterface, Store, UnappliedSlash, }; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use frame_support::{ ensure, traits::{Currency, Defensive, Get, Imbalance, OnUnbalanced}, @@ -182,7 +182,7 @@ impl SlashingSpans { } /// A slashing-span record for a particular stash. -#[derive(Encode, Decode, Default, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Default, TypeInfo)] pub(crate) struct SpanRecord { slashed: Balance, paid_out: Balance, @@ -626,10 +626,7 @@ pub fn do_slash( >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event(super::Event::::Slashed { - staker: stash.clone(), - amount: value, - }); + >::deposit_event(super::Event::::Slashed(stash.clone(), value)); } } diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 0e0ac76523471..ba67292ddc434 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -78,7 +78,8 @@ pub fn create_stash_controller( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); let controller = create_funded_user::("controller", n, balance_factor); - let controller_lookup = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -97,7 +98,8 @@ pub fn create_stash_controller_with_balance( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user_with_balance::("stash", n, balance); let controller = create_funded_user_with_balance::("controller", n, balance); - let controller_lookup = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -118,7 +120,8 @@ pub fn create_stash_and_dead_controller( let stash = create_funded_user::("stash", n, balance_factor); // controller has no funds let controller = create_funded_user::("controller", n, 0); - let controller_lookup = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -133,7 +136,7 @@ pub fn create_stash_and_dead_controller( pub fn create_validators( max: u32, balance_factor: u32, -) -> Result>, &'static str> { +) -> Result::Source>, &'static str> { create_validators_with_seed::(max, balance_factor, 0) } @@ -142,15 +145,15 @@ pub fn create_validators_with_seed( max: u32, balance_factor: u32, seed: u32, -) -> Result>, &'static str> { - let mut validators: Vec> = Vec::with_capacity(max as usize); +) -> Result::Source>, &'static str> { + let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); for i in 0..max { let (stash, controller) = create_stash_controller::(i + seed, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; - let stash_lookup = T::Lookup::unlookup(stash); + let stash_lookup: ::Source = T::Lookup::unlookup(stash); validators.push(stash_lookup); } Ok(validators) @@ -177,10 +180,11 @@ pub fn create_validators_with_nominators_for_era( edge_per_nominator: usize, randomize_stake: bool, to_nominate: Option, -) -> Result>, &'static str> { +) -> Result::Source>, &'static str> { clear_validators_and_nominators::(); - let mut validators_stash: Vec> = Vec::with_capacity(validators as usize); + let mut validators_stash: Vec<::Source> = + Vec::with_capacity(validators as usize); let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); // Create validators @@ -191,7 +195,8 @@ pub fn create_validators_with_nominators_for_era( let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup = T::Lookup::unlookup(v_stash.clone()); + let stash_lookup: ::Source = + T::Lookup::unlookup(v_stash.clone()); validators_stash.push(stash_lookup.clone()); } @@ -206,7 +211,7 @@ pub fn create_validators_with_nominators_for_era( // Have them randomly validate let mut available_validators = validator_chosen.clone(); - let mut selected_validators: Vec> = + let mut selected_validators: Vec<::Source> = Vec::with_capacity(edge_per_nominator); for _ in 0..validators.min(edge_per_nominator as u32) { diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 6609b9087637d..d14d8c4a75f2e 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -17,20 +17,21 @@ //! Tests for the module. -use super::{ConfigOp, Event, *}; +use super::{ConfigOp, Event, MaxUnlockingChunks, *}; use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, bounded_vec, - dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}, + dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{Currency, Get, ReservableCurrency}, + weights::{extract_actual_weight, GetDispatchInfo}, }; use mock::*; use pallet_balances::Error as BalancesError; use sp_runtime::{ assert_eq_error_rate, traits::{BadOrigin, Dispatchable}, - Perbill, Percent, Rounding, + Perbill, Percent, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -44,7 +45,7 @@ fn set_staking_configs_works() { ExtBuilder::default().build_and_execute(|| { // setting works assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Set(1_500), ConfigOp::Set(2_000), ConfigOp::Set(10), @@ -61,7 +62,7 @@ fn set_staking_configs_works() { // noop does nothing assert_storage_noop!(assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Noop, @@ -72,7 +73,7 @@ fn set_staking_configs_works() { // removing works assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Remove, ConfigOp::Remove, ConfigOp::Remove, @@ -98,22 +99,22 @@ fn force_unstake_works() { add_slash(&11); // Cant transfer assert_noop!( - Balances::transfer(RuntimeOrigin::signed(11), 1, 10), + Balances::transfer(Origin::signed(11), 1, 10), BalancesError::::LiquidityRestrictions ); // Force unstake requires root. - assert_noop!(Staking::force_unstake(RuntimeOrigin::signed(11), 11, 2), BadOrigin); + assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 2), BadOrigin); // Force unstake needs correct number of slashing spans (for weight calculation) assert_noop!( - Staking::force_unstake(RuntimeOrigin::root(), 11, 0), + Staking::force_unstake(Origin::root(), 11, 0), Error::::IncorrectSlashingSpans ); // We now force them to unstake - assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 11, 2)); + assert_ok!(Staking::force_unstake(Origin::root(), 11, 2)); // No longer bonded. assert_eq!(Staking::bonded(&11), None); // Transfer works. - assert_ok!(Balances::transfer(RuntimeOrigin::signed(11), 1, 10)); + assert_ok!(Balances::transfer(Origin::signed(11), 1, 10)); }); } @@ -148,14 +149,14 @@ fn basic_setup_works() { // Account 10 controls the stash from account 11, which is 100 * balance_factor units assert_eq!( - Staking::ledger(&10).unwrap(), - StakingLedger { + Staking::ledger(&10), + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], - } + claimed_rewards: vec![] + }) ); // Account 20 controls the stash from account 21, which is 200 * balance_factor units assert_eq!( @@ -165,7 +166,7 @@ fn basic_setup_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] }) ); // Account 1 does not control any stash @@ -188,7 +189,7 @@ fn basic_setup_works() { total: 500, active: 500, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] }) ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); @@ -235,19 +236,19 @@ fn change_controller_works() { assert_eq!(Staking::bonded(&11), Some(10)); // 10 can control 11 who is initially a validator. - assert_ok!(Staking::chill(RuntimeOrigin::signed(10))); + assert_ok!(Staking::chill(Origin::signed(10))); // change controller - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(11), 5)); + assert_ok!(Staking::set_controller(Origin::signed(11), 5)); assert_eq!(Staking::bonded(&11), Some(5)); mock::start_active_era(1); // 10 is no longer in control. assert_noop!( - Staking::validate(RuntimeOrigin::signed(10), ValidatorPrefs::default()), + Staking::validate(Origin::signed(10), ValidatorPrefs::default()), Error::::NotController, ); - assert_ok!(Staking::validate(RuntimeOrigin::signed(5), ValidatorPrefs::default())); + assert_ok!(Staking::validate(Origin::signed(5), ValidatorPrefs::default())); }) } @@ -300,14 +301,13 @@ fn rewards_should_work() { start_session(3); assert_eq!(active_era(), 1); - assert_eq!(mock::RewardRemainderUnbalanced::get(), maximum_payout - total_payout_0,); + assert_eq!( + mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), + maximum_payout - total_payout_0, + ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid { - era_index: 0, - validator_payout: total_payout_0, - remainder: maximum_payout - total_payout_0 - } + Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) ); mock::make_all_reward_payment(0); @@ -340,16 +340,12 @@ fn rewards_should_work() { mock::start_active_era(2); assert_eq!( - mock::RewardRemainderUnbalanced::get(), + mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), maximum_payout * 2 - total_payout_0 - total_payout_1, ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid { - era_index: 1, - validator_payout: total_payout_1, - remainder: maximum_payout - total_payout_1 - } + Event::EraPaid(1, total_payout_1, maximum_payout - total_payout_1) ); mock::make_all_reward_payment(1); @@ -390,13 +386,9 @@ fn staking_should_work() { // --- Block 2: start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 1500, RewardDestination::Controller)); - assert_ok!(Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default())); - assert_ok!(Session::set_keys( - RuntimeOrigin::signed(4), - SessionKeys { other: 4.into() }, - vec![] - )); + assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + assert_ok!(Session::set_keys(Origin::signed(4), SessionKeys { other: 4.into() }, vec![])); // No effects will be seen so far. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -420,7 +412,7 @@ fn staking_should_work() { assert_eq_uvec!(validator_controllers(), vec![20, 4]); // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 // 4 will chill - Staking::chill(RuntimeOrigin::signed(4)).unwrap(); + Staking::chill(Origin::signed(4)).unwrap(); // --- Block 7: nothing. 4 is still there. start_session(7); @@ -441,7 +433,7 @@ fn staking_should_work() { total: 1500, active: 1500, unlocking: Default::default(), - claimed_rewards: bounded_vec![0], + claimed_rewards: vec![0], }) ); // e.g. it cannot reserve more than 500 that it has free from the total 2000 @@ -459,20 +451,20 @@ fn blocking_and_kicking_works() { .build_and_execute(|| { // block validator 10/11 assert_ok!(Staking::validate( - RuntimeOrigin::signed(10), + Origin::signed(10), ValidatorPrefs { blocked: true, ..Default::default() } )); // attempt to nominate from 100/101... - assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![11])); + assert_ok!(Staking::nominate(Origin::signed(100), vec![11])); // should have worked since we're already nominated them assert_eq!(Nominators::::get(&101).unwrap().targets, vec![11]); // kick the nominator - assert_ok!(Staking::kick(RuntimeOrigin::signed(10), vec![101])); + assert_ok!(Staking::kick(Origin::signed(10), vec![101])); // should have been kicked now assert!(Nominators::::get(&101).unwrap().targets.is_empty()); // attempt to nominate from 100/101... assert_noop!( - Staking::nominate(RuntimeOrigin::signed(100), vec![11]), + Staking::nominate(Origin::signed(100), vec![11]), Error::::BadTarget ); }); @@ -518,7 +510,7 @@ fn no_candidate_emergency_condition() { ::MinimumValidatorCount::put(10); // try to chill - let res = Staking::chill(RuntimeOrigin::signed(10)); + let res = Staking::chill(Origin::signed(10)); assert_ok!(res); let current_era = CurrentEra::::get(); @@ -554,26 +546,14 @@ fn nominating_and_rewards_should_work() { assert_eq_uvec!(validator_controllers(), vec![40, 20]); // re-validate with 11 and 31. - assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); - assert_ok!(Staking::validate(RuntimeOrigin::signed(30), Default::default())); + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + assert_ok!(Staking::validate(Origin::signed(30), Default::default())); // Set payee to controller. - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(10), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(20), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(30), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(40), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(20), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(30), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(40), RewardDestination::Controller)); // give the man some money let initial_balance = 1000; @@ -582,21 +562,11 @@ fn nominating_and_rewards_should_work() { } // bond two account pairs and state interest in nomination. - assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), - 2, - 1000, - RewardDestination::Controller - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 21, 31])); + assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(3), - 4, - 1000, - RewardDestination::Controller - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![11, 21, 41])); + assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); // the total reward for era 0 let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -748,28 +718,20 @@ fn double_staking_should_fail() { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, arbitrary_value, RewardDestination::default() )); // 4 = not used so far, 1 stashed => not allowed. assert_noop!( - Staking::bond( - RuntimeOrigin::signed(1), - 4, - arbitrary_value, - RewardDestination::default() - ), + Staking::bond(Origin::signed(1), 4, arbitrary_value, RewardDestination::default()), Error::::AlreadyBonded, ); // 1 = stashed => attempting to nominate should fail. - assert_noop!( - Staking::nominate(RuntimeOrigin::signed(1), vec![1]), - Error::::NotController - ); + assert_noop!(Staking::nominate(Origin::signed(1), vec![1]), Error::::NotController); // 2 = controller => nominating should work. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); }); } @@ -782,19 +744,14 @@ fn double_controlling_should_fail() { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), + Origin::signed(1), 2, arbitrary_value, RewardDestination::default(), )); // 2 = controller, 3 stashed (Note that 2 is reused.) => no-op assert_noop!( - Staking::bond( - RuntimeOrigin::signed(3), - 2, - arbitrary_value, - RewardDestination::default() - ), + Staking::bond(Origin::signed(3), 2, arbitrary_value, RewardDestination::default()), Error::::AlreadyPaired, ); }); @@ -960,14 +917,14 @@ fn cannot_transfer_staked_balance() { assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( - Balances::transfer(RuntimeOrigin::signed(11), 20, 1), + Balances::transfer(Origin::signed(11), 20, 1), BalancesError::::LiquidityRestrictions ); // Give account 11 extra free balance let _ = Balances::make_free_balance_be(&11, 10000); // Confirm that account 11 can now transfer some balance - assert_ok!(Balances::transfer(RuntimeOrigin::signed(11), 20, 1)); + assert_ok!(Balances::transfer(Origin::signed(11), 20, 1)); }); } @@ -985,10 +942,10 @@ fn cannot_transfer_staked_balance_2() { assert_eq!(Staking::eras_stakers(active_era(), 21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( - Balances::transfer(RuntimeOrigin::signed(21), 20, 1001), + Balances::transfer(Origin::signed(21), 20, 1001), BalancesError::::LiquidityRestrictions ); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(21), 20, 1000)); + assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); }); } @@ -1030,7 +987,7 @@ fn reward_destination_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); @@ -1053,7 +1010,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0], + claimed_rewards: vec![0], }) ); @@ -1081,7 +1038,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0, 1], + claimed_rewards: vec![0, 1], }) ); @@ -1110,7 +1067,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0, 1, 2], + claimed_rewards: vec![0, 1, 2], }) ); // Check that amount in staked account is NOT increased. @@ -1125,7 +1082,10 @@ fn validator_payment_prefs_work() { // This test will focus on validator payment. ExtBuilder::default().build_and_execute(|| { let commission = Perbill::from_percent(40); - >::insert(&11, ValidatorPrefs { commission, ..Default::default() }); + >::insert( + &11, + ValidatorPrefs { commission: commission.clone(), ..Default::default() }, + ); // Reward controller so staked ratio doesn't change. >::insert(&11, RewardDestination::Controller); @@ -1172,7 +1132,7 @@ fn bond_extra_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); @@ -1180,7 +1140,7 @@ fn bond_extra_works() { let _ = Balances::make_free_balance_be(&11, 1000000); // Call the bond_extra function from controller, add only 100 - assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 100)); + assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); // There should be 100 more `total` and `active` in the ledger assert_eq!( Staking::ledger(&10), @@ -1189,12 +1149,12 @@ fn bond_extra_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Call the bond_extra function with a large number, should handle it - assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), Balance::max_value())); + assert_ok!(Staking::bond_extra(Origin::signed(11), Balance::max_value())); // The full amount of the funds should now be in the total and active assert_eq!( Staking::ledger(&10), @@ -1203,7 +1163,7 @@ fn bond_extra_works() { total: 1000000, active: 1000000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); }); @@ -1219,7 +1179,7 @@ fn bond_extra_and_withdraw_unbonded_works() { // * Once the unbonding period is done, it can actually take the funds out of the stash. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1241,7 +1201,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); assert_eq!( @@ -1250,7 +1210,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // deposit the extra 100 units - Staking::bond_extra(RuntimeOrigin::signed(11), 100).unwrap(); + Staking::bond_extra(Origin::signed(11), 100).unwrap(); assert_eq!( Staking::ledger(&10), @@ -1259,7 +1219,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Exposure is a snapshot! only updated after the next era update. @@ -1280,7 +1240,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Exposure is now updated. @@ -1290,7 +1250,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // Unbond almost all of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 1000).unwrap(); + Staking::unbond(Origin::signed(10), 1000).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1298,12 +1258,12 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] }), ); // Attempting to free the balances now will fail. 2 eras need to pass. - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1311,7 +1271,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] }), ); @@ -1319,7 +1279,7 @@ fn bond_extra_and_withdraw_unbonded_works() { mock::start_active_era(3); // nothing yet - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1327,14 +1287,14 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] }), ); // trigger next era. mock::start_active_era(5); - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); // Now the value is free and the staking ledger is updated. assert_eq!( Staking::ledger(&10), @@ -1343,7 +1303,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 100, active: 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] }), ); }) @@ -1354,12 +1314,11 @@ fn too_many_unbond_calls_should_not_work() { ExtBuilder::default().build_and_execute(|| { let mut current_era = 0; // locked at era MaxUnlockingChunks - 1 until 3 - - for i in 0..<::MaxUnlockingChunks as Get>::get() - 1 { + for i in 0..MaxUnlockingChunks::get() - 1 { // There is only 1 chunk per era, so we need to be in a new era to create a chunk. current_era = i as u32; mock::start_active_era(current_era); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); + assert_ok!(Staking::unbond(Origin::signed(10), 1)); } current_era += 1; @@ -1367,24 +1326,24 @@ fn too_many_unbond_calls_should_not_work() { // This chunk is locked at `current_era` through `current_era + 2` (because BondingDuration // == 3). - assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); + assert_ok!(Staking::unbond(Origin::signed(10), 1)); assert_eq!( Staking::ledger(&10).unwrap().unlocking.len(), - <::MaxUnlockingChunks as Get>::get() as usize + MaxUnlockingChunks::get() as usize ); // can't do more. - assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); + assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); current_era += 2; mock::start_active_era(current_era); - assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); + assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); // free up everything except the most recently added chunk. - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); assert_eq!(Staking::ledger(&10).unwrap().unlocking.len(), 1); // Can add again. - assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); + assert_ok!(Staking::unbond(Origin::signed(10), 1)); assert_eq!(Staking::ledger(&10).unwrap().unlocking.len(), 2); }) } @@ -1398,7 +1357,7 @@ fn rebond_works() { // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1414,7 +1373,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); @@ -1422,10 +1381,10 @@ fn rebond_works() { assert_eq!(active_era(), 2); // Try to rebond some funds. We get an error since no fund is unbonded. - assert_noop!(Staking::rebond(RuntimeOrigin::signed(10), 500), Error::::NoUnlockChunk); + assert_noop!(Staking::rebond(Origin::signed(10), 500), Error::::NoUnlockChunk); // Unbond almost all of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 900).unwrap(); + Staking::unbond(Origin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1433,12 +1392,12 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Re-bond all the funds unbonded. - Staking::rebond(RuntimeOrigin::signed(10), 900).unwrap(); + Staking::rebond(Origin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1446,12 +1405,12 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Unbond almost all of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 900).unwrap(); + Staking::unbond(Origin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1459,12 +1418,12 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Re-bond part of the funds unbonded. - Staking::rebond(RuntimeOrigin::signed(10), 500).unwrap(); + Staking::rebond(Origin::signed(10), 500).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1472,12 +1431,12 @@ fn rebond_works() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Re-bond the remainder of the funds unbonded. - Staking::rebond(RuntimeOrigin::signed(10), 500).unwrap(); + Staking::rebond(Origin::signed(10), 500).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1485,14 +1444,14 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Unbond parts of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); - Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); - Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1500,12 +1459,12 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Re-bond part of the funds unbonded. - Staking::rebond(RuntimeOrigin::signed(10), 500).unwrap(); + Staking::rebond(Origin::signed(10), 500).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1513,7 +1472,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); }) @@ -1524,7 +1483,7 @@ fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1540,14 +1499,14 @@ fn rebond_is_fifo() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); mock::start_active_era(2); // Unbond some of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 400).unwrap(); + Staking::unbond(Origin::signed(10), 400).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1555,14 +1514,14 @@ fn rebond_is_fifo() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); mock::start_active_era(3); // Unbond more of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1573,14 +1532,14 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, ], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); mock::start_active_era(4); // Unbond yet more of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 200).unwrap(); + Staking::unbond(Origin::signed(10), 200).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1592,12 +1551,12 @@ fn rebond_is_fifo() { UnlockChunk { value: 300, era: 3 + 3 }, UnlockChunk { value: 200, era: 4 + 3 }, ], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Re-bond half of the unbonding funds. - Staking::rebond(RuntimeOrigin::signed(10), 400).unwrap(); + Staking::rebond(Origin::signed(10), 400).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1608,7 +1567,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 100, era: 3 + 3 }, ], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); }) @@ -1620,7 +1579,7 @@ fn rebond_emits_right_value_in_event() { // and the rebond event emits the actual value rebonded. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1629,7 +1588,7 @@ fn rebond_emits_right_value_in_event() { mock::start_active_era(1); // Unbond almost all of the funds in stash. - Staking::unbond(RuntimeOrigin::signed(10), 900).unwrap(); + Staking::unbond(Origin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1637,12 +1596,12 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Re-bond less than the total - Staking::rebond(RuntimeOrigin::signed(10), 100).unwrap(); + Staking::rebond(Origin::signed(10), 100).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1650,14 +1609,14 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 200, unlocking: bounded_vec![UnlockChunk { value: 800, era: 1 + 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Event emitted should be correct - assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 100 }); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); // Re-bond way more than available - Staking::rebond(RuntimeOrigin::signed(10), 100_000).unwrap(); + Staking::rebond(Origin::signed(10), 100_000).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1665,11 +1624,11 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); // Event emitted should be correct, only 800 - assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 800 }); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 800)); }); } @@ -1701,7 +1660,7 @@ fn reward_to_stake_works() { total: 69, active: 69, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }, ); @@ -1747,14 +1706,11 @@ fn reap_stash_works() { // stash is not reapable assert_noop!( - Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), + Staking::reap_stash(Origin::signed(20), 11, 0), Error::::FundedTarget ); // controller or any other account is not reapable - assert_noop!( - Staking::reap_stash(RuntimeOrigin::signed(20), 10, 0), - Error::::NotStash - ); + assert_noop!(Staking::reap_stash(Origin::signed(20), 10, 0), Error::::NotStash); // no easy way to cause an account to go below ED, we tweak their staking ledger // instead. @@ -1765,12 +1721,12 @@ fn reap_stash_works() { total: 5, active: 5, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }, ); // reap-able - assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0)); + assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 0)); // then assert!(!>::contains_key(&10)); @@ -1787,10 +1743,7 @@ fn switching_roles() { ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination for i in &[10, 20] { - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(*i), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -1801,20 +1754,16 @@ fn switching_roles() { } // add 2 nominators - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2, 2000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 5])); + assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 5])); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 500, RewardDestination::Controller)); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![21, 1])); + assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 1])); // add a new validator candidate - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 6, 1000, RewardDestination::Controller)); - assert_ok!(Staking::validate(RuntimeOrigin::signed(6), ValidatorPrefs::default())); - assert_ok!(Session::set_keys( - RuntimeOrigin::signed(6), - SessionKeys { other: 6.into() }, - vec![] - )); + assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); + assert_ok!(Session::set_keys(Origin::signed(6), SessionKeys { other: 6.into() }, vec![])); mock::start_active_era(1); @@ -1822,12 +1771,8 @@ fn switching_roles() { assert_eq_uvec!(validator_controllers(), vec![6, 10]); // 2 decides to be a validator. Consequences: - assert_ok!(Staking::validate(RuntimeOrigin::signed(2), ValidatorPrefs::default())); - assert_ok!(Session::set_keys( - RuntimeOrigin::signed(2), - SessionKeys { other: 2.into() }, - vec![] - )); + assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); + assert_ok!(Session::set_keys(Origin::signed(2), SessionKeys { other: 2.into() }, vec![])); // new stakes: // 10: 1000 self vote // 20: 1000 self vote + 250 vote @@ -1879,20 +1824,15 @@ fn bond_with_no_staked_value() { .build_and_execute(|| { // Can't bond with 1 assert_noop!( - Staking::bond(RuntimeOrigin::signed(1), 2, 1, RewardDestination::Controller), + Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), Error::::InsufficientBond, ); // bonded with absolute minimum value possible. - assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), - 2, - 5, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. - assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 1)); + assert_ok!(Staking::unbond(Origin::signed(2), 1)); assert_eq!( Staking::ledger(2), Some(StakingLedger { @@ -1900,7 +1840,7 @@ fn bond_with_no_staked_value() { active: 0, total: 5, unlocking: bounded_vec![UnlockChunk { value: 5, era: 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); @@ -1908,14 +1848,14 @@ fn bond_with_no_staked_value() { mock::start_active_era(2); // not yet removed. - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(2), 0)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); assert!(Staking::ledger(2).is_some()); assert_eq!(Balances::locks(&1)[0].amount, 5); mock::start_active_era(3); // poof. Account 1 is removed from the staking system. - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(2), 0)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); assert!(Staking::ledger(2).is_none()); assert_eq!(Balances::locks(&1).len(), 0); }); @@ -1929,24 +1869,16 @@ fn bond_with_little_staked_value_bounded() { .minimum_validator_count(1) .build_and_execute(|| { // setup - assert_ok!(Staking::chill(RuntimeOrigin::signed(30))); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(10), - RewardDestination::Controller - )); + assert_ok!(Staking::chill(Origin::signed(30))); + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); let init_balance_2 = Balances::free_balance(&2); let init_balance_10 = Balances::free_balance(&10); // Stingy validator. - assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), - 2, - 1, - RewardDestination::Controller - )); - assert_ok!(Staking::validate(RuntimeOrigin::signed(2), ValidatorPrefs::default())); + assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); assert_ok!(Session::set_keys( - RuntimeOrigin::signed(2), + Origin::signed(2), SessionKeys { other: 2.into() }, vec![] )); @@ -2019,21 +1951,11 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { let _ = Balances::make_free_balance_be(i, initial_balance); } - assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), - 2, - 1000, - RewardDestination::Controller - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 11, 11, 21, 31])); + assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31])); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(3), - 4, - 1000, - RewardDestination::Controller - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![21, 31])); + assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); // winners should be 21 and 31. Otherwise this election is taking duplicates into // account. @@ -2074,21 +1996,11 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { let _ = Balances::make_free_balance_be(i, initial_balance); } - assert_ok!(Staking::bond( - RuntimeOrigin::signed(1), - 2, - 1000, - RewardDestination::Controller - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 11, 11, 21])); + assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21])); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(3), - 4, - 1000, - RewardDestination::Controller - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![21])); + assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![21])); // winners should be 21 and 11. let supports = ::ElectionProvider::elect().unwrap(); @@ -2120,8 +2032,8 @@ fn phragmen_should_not_overflow() { // This is the maximum value that we can have as the outcome of CurrencyToVote. type Votes = u64; - let _ = Staking::chill(RuntimeOrigin::signed(10)); - let _ = Staking::chill(RuntimeOrigin::signed(20)); + let _ = Staking::chill(Origin::signed(10)); + let _ = Staking::chill(Origin::signed(20)); bond_validator(3, 2, Votes::max_value() as Balance); bond_validator(5, 4, Votes::max_value() as Balance); @@ -2162,7 +2074,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { ErasStakers::::insert(0, 11, &exposure); ErasStakersClipped::::insert(0, 11, exposure); ErasValidatorReward::::insert(0, stake); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 0)); assert_eq!(Balances::total_balance(&11), stake * 2); // Set staker @@ -2170,8 +2082,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { let _ = Balances::make_free_balance_be(&2, stake); // only slashes out of bonded stake are applied. without this line, it is 0. - Staking::bond(RuntimeOrigin::signed(2), 20000, stake - 1, RewardDestination::default()) - .unwrap(); + Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 ErasStakers::::insert( 0, @@ -2247,7 +2158,7 @@ fn unbonded_balance_is_not_slashable() { // total amount staked is slashable. assert_eq!(Staking::slashable_balance_of(&11), 1000); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 800)); + assert_ok!(Staking::unbond(Origin::signed(10), 800)); // only the active portion. assert_eq!(Staking::slashable_balance_of(&11), 200); @@ -2303,7 +2214,7 @@ fn offence_forces_new_era() { #[test] fn offence_ensures_new_era_without_clobbering() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); + assert_ok!(Staking::force_new_era_always(Origin::root())); assert_eq!(Staking::force_era(), Forcing::ForceAlways); on_offence_now( @@ -2384,7 +2295,7 @@ fn slash_in_old_span_does_not_deselect() { mock::start_active_era(2); - Staking::validate(RuntimeOrigin::signed(10), Default::default()).unwrap(); + Staking::validate(Origin::signed(10), Default::default()).unwrap(); assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(!Session::validators().contains(&11)); @@ -2643,10 +2554,10 @@ fn garbage_collection_after_slashing() { // reap_stash respects num_slashing_spans so that weight is accurate assert_noop!( - Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), + Staking::reap_stash(Origin::signed(20), 11, 0), Error::::IncorrectSlashingSpans ); - assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 2)); + assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 2)); assert!(::SlashingSpans::get(&11).is_none()); assert_eq!(::SpanSlash::get(&(11, 0)).amount(), &0); @@ -2809,7 +2720,7 @@ fn slashes_are_summed_across_spans() { assert_eq!(Balances::free_balance(21), 1900); // 21 has been force-chilled. re-signal intent to validate. - Staking::validate(RuntimeOrigin::signed(20), Default::default()).unwrap(); + Staking::validate(Origin::signed(20), Default::default()).unwrap(); mock::start_active_era(4); @@ -2879,9 +2790,9 @@ fn deferred_slashes_are_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 } + Event::EraPaid(3, 11075, 33225), + Event::Slashed(11, 100), + Event::Slashed(101, 12) ] ); }) @@ -2910,9 +2821,9 @@ fn retroactive_deferred_slashes_two_eras_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 7100, remainder: 21300 }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, + Event::EraPaid(3, 7100, 21300), + Event::Slashed(11, 100), + Event::Slashed(101, 12) ] ); }) @@ -2928,8 +2839,8 @@ fn retroactive_deferred_slashes_one_before() { // unbond at slash era. mock::start_active_era(2); - assert_ok!(Staking::chill(RuntimeOrigin::signed(10))); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 100)); + assert_ok!(Staking::chill(Origin::signed(10))); + assert_ok!(Staking::unbond(Origin::signed(10), 100)); mock::start_active_era(3); on_offence_in_era( @@ -2943,10 +2854,7 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(4); assert_eq!( staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 } - ] + vec![Event::StakersElected, Event::EraPaid(3, 11075, 33225)] ); assert_eq!(Staking::ledger(10).unwrap().total, 1000); @@ -2956,15 +2864,15 @@ fn retroactive_deferred_slashes_one_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid { era_index: 4, validator_payout: 11075, remainder: 33225 }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 } + Event::EraPaid(4, 11075, 33225), + Event::Slashed(11, 100), + Event::Slashed(101, 12) ] ); // their ledger has already been slashed. assert_eq!(Staking::ledger(10).unwrap().total, 900); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1000)); + assert_ok!(Staking::unbond(Origin::signed(10), 1000)); assert_eq!(Staking::ledger(10).unwrap().total, 900); }) } @@ -2990,8 +2898,8 @@ fn staker_cannot_bail_deferred_slash() { ); // now we chill - assert_ok!(Staking::chill(RuntimeOrigin::signed(100))); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(100), 500)); + assert_ok!(Staking::chill(Origin::signed(100))); + assert_ok!(Staking::unbond(Origin::signed(100), 500)); assert_eq!(Staking::current_era().unwrap(), 1); assert_eq!(active_era(), 1); @@ -3002,7 +2910,7 @@ fn staker_cannot_bail_deferred_slash() { active: 0, total: 500, stash: 101, - claimed_rewards: bounded_vec![], + claimed_rewards: Default::default(), unlocking: bounded_vec![UnlockChunk { era: 4u32, value: 500 }], } ); @@ -3026,9 +2934,7 @@ fn staker_cannot_bail_deferred_slash() { assert_eq!(active_era(), 3); // and cannot yet unbond: - assert_storage_noop!(assert!( - Staking::withdraw_unbonded(RuntimeOrigin::signed(100), 0).is_ok() - )); + assert_storage_noop!(assert!(Staking::withdraw_unbonded(Origin::signed(100), 0).is_ok())); assert_eq!( Ledger::::get(100).unwrap().unlocking.into_inner(), vec![UnlockChunk { era: 4u32, value: 500 as Balance }], @@ -3077,12 +2983,12 @@ fn remove_deferred() { // fails if empty assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![]), + Staking::cancel_deferred_slash(Origin::root(), 1, vec![]), Error::::EmptyTargets ); // cancel one of them. - assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0])); + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0])); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); @@ -3102,9 +3008,9 @@ fn remove_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, - Event::Slashed { staker: 11, amount: 50 }, - Event::Slashed { staker: 101, amount: 7 } + Event::EraPaid(3, 11075, 33225), + Event::Slashed(11, 50), + Event::Slashed(101, 7) ] ); @@ -3163,21 +3069,21 @@ fn remove_multi_deferred() { // fails if list is not sorted assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]), + Staking::cancel_deferred_slash(Origin::root(), 1, vec![2, 0, 4]), Error::::NotSortedAndUnique ); // fails if list is not unique assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]), + Staking::cancel_deferred_slash(Origin::root(), 1, vec![0, 2, 2]), Error::::NotSortedAndUnique ); // fails if bad index assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]), + Staking::cancel_deferred_slash(Origin::root(), 1, vec![1, 2, 3, 4, 5]), Error::::InvalidSlashIndex ); - assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4])); + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0, 2, 4])); let slashes = ::UnappliedSlashes::get(&4); assert_eq!(slashes.len(), 2); @@ -3227,7 +3133,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert!(nominations.submitted_in < last_slash); // actually re-bond the slashed validator - assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); mock::start_active_era(2); let exposure_11 = Staking::eras_stakers(active_era(), &11); @@ -3409,7 +3315,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // * double claim of one era fails ExtBuilder::default().nominate(true).build_and_execute(|| { // Consumed weight for all payout_stakers dispatches that fail - let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); let init_balance_10 = Balances::total_balance(&10); let init_balance_100 = Balances::total_balance(&100); @@ -3444,7 +3350,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert!(total_payout_2 != total_payout_0); assert!(total_payout_2 != total_payout_1); - mock::start_active_era(HistoryDepth::get() + 1); + mock::start_active_era(Staking::history_depth() + 1); let active_era = active_era(); @@ -3452,21 +3358,21 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let current_era = Staking::current_era().unwrap(); // Last kept is 1: - assert!(current_era - HistoryDepth::get() == 1); + assert!(current_era - Staking::history_depth() == 1); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0), + Staking::payout_stakers(Origin::signed(1337), 11, 0), // Fail: Era out of history Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), + Staking::payout_stakers(Origin::signed(1337), 11, 2), // Fail: Double claim Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, active_era), + Staking::payout_stakers(Origin::signed(1337), 11, active_era), // Fail: Era not finished yet Error::::InvalidEraToReward.with_weight(err_weight) ); @@ -3587,12 +3493,12 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( let balance = 10_000 + i as Balance; Balances::make_free_balance_be(&stash, balance); assert_ok!(Staking::bond( - RuntimeOrigin::signed(stash), + Origin::signed(stash), controller, balance, RewardDestination::Stash )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![11])); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); } mock::start_active_era(1); @@ -3616,6 +3522,25 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( }); } +#[test] +fn set_history_depth_works() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(10); + Staking::set_history_depth(Origin::root(), 20, 0).unwrap(); + assert!(::ErasTotalStake::contains_key(10 - 4)); + assert!(::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::root(), 4, 0).unwrap(); + assert!(::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::root(), 3, 0).unwrap(); + assert!(!::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::root(), 8, 0).unwrap(); + assert!(!::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + }); +} + #[test] fn test_payout_stakers() { // Test that payout_stakers work in general, including that only the top @@ -3652,7 +3577,7 @@ fn test_payout_stakers() { let pre_payout_total_issuance = Balances::total_issuance(); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); assert_eq_error_rate!( Balances::total_issuance(), pre_payout_total_issuance + actual_paid_out, @@ -3679,7 +3604,7 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![1] + claimed_rewards: vec![1] }) ); @@ -3693,7 +3618,7 @@ fn test_payout_stakers() { mock::start_active_era(i); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, i - 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); assert_eq_error_rate!( Balances::total_issuance(), pre_payout_total_issuance + actual_paid_out, @@ -3710,15 +3635,11 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (1..=14).collect::>().try_into().unwrap() + claimed_rewards: (1..=14).collect() }) ); - let last_era = 99; - let history_depth = HistoryDepth::get(); - let expected_last_reward_era = last_era - 1; - let expected_start_reward_era = last_era - history_depth; - for i in 16..=last_era { + for i in 16..100 { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); @@ -3726,16 +3647,8 @@ fn test_payout_stakers() { } // We clean it up as history passes - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), - 11, - expected_start_reward_era - )); - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), - 11, - expected_last_reward_era - )); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -3743,14 +3656,14 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![expected_start_reward_era, expected_last_reward_era] + claimed_rewards: vec![15, 98] }) ); // Out of order claims works. - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 69)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 23)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 42)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 69)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 23)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -3758,13 +3671,7 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![ - expected_start_reward_era, - 23, - 42, - 69, - expected_last_reward_era - ] + claimed_rewards: vec![15, 23, 42, 69, 98] }) ); }); @@ -3775,7 +3682,7 @@ fn payout_stakers_handles_basic_errors() { // Here we will test payouts handle all errors. ExtBuilder::default().has_stakers(false).build_and_execute(|| { // Consumed weight for all payout_stakers dispatches that fail - let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); // Same setup as the test above let balance = 1000; @@ -3796,56 +3703,41 @@ fn payout_stakers_handles_basic_errors() { // Wrong Era, too big assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), + Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward.with_weight(err_weight) ); // Wrong Staker assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 10, 1), + Staking::payout_stakers(Origin::signed(1337), 10, 1), Error::::NotStash.with_weight(err_weight) ); - let last_era = 99; - for i in 3..=last_era { + for i in 3..100 { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(i); } - - let history_depth = HistoryDepth::get(); - let expected_last_reward_era = last_era - 1; - let expected_start_reward_era = last_era - history_depth; - - // We are at era last_era=99. Given history_depth=80, we should be able - // to payout era starting from expected_start_reward_era=19 through - // expected_last_reward_era=98 (80 total eras), but not 18 or 99. + // We are at era 99, with history depth of 84 + // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era - 1), + Staking::payout_stakers(Origin::signed(1337), 11, 14), Error::::InvalidEraToReward.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era + 1), + Staking::payout_stakers(Origin::signed(1337), 11, 99), Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), - 11, - expected_start_reward_era - )); - assert_ok!(Staking::payout_stakers( - RuntimeOrigin::signed(1337), - 11, - expected_last_reward_era - )); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); // Can't claim again assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era), + Staking::payout_stakers(Origin::signed(1337), 11, 15), Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era), + Staking::payout_stakers(Origin::signed(1337), 11, 98), Error::::AlreadyClaimed.with_weight(err_weight) ); }); @@ -3870,9 +3762,9 @@ fn payout_stakers_handles_weight_refund() { let half_max_nom_rewarded_weight = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); - assert!(zero_nom_payouts_weight.any_gt(Weight::zero())); - assert!(half_max_nom_rewarded_weight.any_gt(zero_nom_payouts_weight)); - assert!(max_nom_rewarded_weight.any_gt(half_max_nom_rewarded_weight)); + assert!(zero_nom_payouts_weight > 0); + assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); + assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); let balance = 1000; bond_validator(11, 10, balance); @@ -3893,9 +3785,10 @@ fn payout_stakers_handles_weight_refund() { start_active_era(2); // Collect payouts when there are no nominators - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(20)); + let result = call.dispatch(Origin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); @@ -3906,9 +3799,10 @@ fn payout_stakers_handles_weight_refund() { start_active_era(3); // Collect payouts for an era where the validator did not receive any points. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(20)); + let result = call.dispatch(Origin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); @@ -3919,9 +3813,10 @@ fn payout_stakers_handles_weight_refund() { start_active_era(4); // Collect payouts when the validator has `half_max_nom_rewarded` nominators. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(20)); + let result = call.dispatch(Origin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), half_max_nom_rewarded_weight); @@ -3942,16 +3837,18 @@ fn payout_stakers_handles_weight_refund() { start_active_era(6); // Collect payouts when the validator had `half_max_nom_rewarded` nominators. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(20)); + let result = call.dispatch(Origin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); // Try and collect payouts for an era that has already been collected. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(20)); + let result = call.dispatch(Origin::signed(20)); assert!(result.is_err()); // When there is an error the consumed weight == weight when there are 0 nominator payouts. assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); @@ -3970,7 +3867,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }) ); mock::start_active_era(5); @@ -3982,14 +3879,10 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (0..5).collect::>().try_into().unwrap(), + claimed_rewards: (0..5).collect(), }) ); - - // make sure only era upto history depth is stored - let current_era = 99; - let last_reward_era = 99 - HistoryDepth::get(); - mock::start_active_era(current_era); + mock::start_active_era(99); bond_validator(13, 12, 1000); assert_eq!( Staking::ledger(&12), @@ -3998,10 +3891,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (last_reward_era..current_era) - .collect::>() - .try_into() - .unwrap(), + claimed_rewards: (15..99).collect(), }) ); }); @@ -4011,68 +3901,42 @@ fn bond_during_era_correctly_populates_claimed_rewards() { fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write - let zero_offence_weight = - ::DbWeight::get().reads_writes(4, 1); - assert_eq!( - Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), - zero_offence_weight - ); + let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); + assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), zero_offence_weight); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes - let n_offence_unapplied_weight = ::DbWeight::get() - .reads_writes(4, 1) + - ::DbWeight::get().reads_writes(4, 5); - - let offenders: Vec< - OffenceDetails< - ::AccountId, - pallet_session::historical::IdentificationTuple, - >, - > = (1..10) - .map(|i| OffenceDetails { - offender: (i, Staking::eras_stakers(active_era(), i)), - reporters: vec![], - }) - .collect(); - assert_eq!( - Staking::on_offence( - &offenders, - &[Perbill::from_percent(50)], - 0, - DisableStrategy::WhenSlashed - ), - n_offence_unapplied_weight - ); + let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(4, 5); + + let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> + = (1..10).map(|i| + OffenceDetails { + offender: (i, Staking::eras_stakers(active_era(), i)), + reporters: vec![], + } + ).collect(); + assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), n_offence_unapplied_weight); // On Offence with one offenders, Applied - let one_offender = [OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), - reporters: vec![1], - }]; + let one_offender = [ + OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![1], + }, + ]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes - let one_offence_unapplied_weight = - ::DbWeight::get().reads_writes(4, 1) - + - ::DbWeight::get().reads_writes(rw, rw) + let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(rw, rw) // One `slash_cost` + ::DbWeight::get().reads_writes(6, 5) // `slash_cost` * nominators (1) + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) - + ::DbWeight::get().reads_writes(2, 2) - ; + + ::DbWeight::get().reads_writes(2, 2); - assert_eq!( - Staking::on_offence( - &one_offender, - &[Perbill::from_percent(50)], - 0, - DisableStrategy::WhenSlashed{} - ), - one_offence_unapplied_weight - ); + assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), one_offence_unapplied_weight); }); } @@ -4087,7 +3951,7 @@ fn payout_creates_controller() { bond_nominator(1234, 1337, 100, vec![11]); // kill controller - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1337), 1234, 100)); + assert_ok!(Balances::transfer(Origin::signed(1337), 1234, 100)); assert_eq!(Balances::free_balance(1337), 0); mock::start_active_era(1); @@ -4095,7 +3959,7 @@ fn payout_creates_controller() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Controller is created assert!(Balances::free_balance(1337) > 0); @@ -4113,7 +3977,7 @@ fn payout_to_any_account_works() { bond_nominator(1234, 1337, 100, vec![11]); // Update payout location - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(1337), RewardDestination::Account(42))); + assert_ok!(Staking::set_payee(Origin::signed(1337), RewardDestination::Account(42))); // Reward Destination account doesn't exist assert_eq!(Balances::free_balance(42), 0); @@ -4123,7 +3987,7 @@ fn payout_to_any_account_works() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Payment is successful assert!(Balances::free_balance(42) > 0); @@ -4246,13 +4110,13 @@ fn cannot_rebond_to_lower_than_ed() { total: 10 * 1000, active: 10 * 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] } ); // unbond all of it. must be chilled first. - assert_ok!(Staking::chill(RuntimeOrigin::signed(20))); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(20), 10 * 1000)); + assert_ok!(Staking::chill(Origin::signed(20))); + assert_ok!(Staking::unbond(Origin::signed(20), 10 * 1000)); assert_eq!( Staking::ledger(&20).unwrap(), StakingLedger { @@ -4260,15 +4124,12 @@ fn cannot_rebond_to_lower_than_ed() { total: 10 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 10 * 1000, era: 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] } ); // now bond a wee bit more - assert_noop!( - Staking::rebond(RuntimeOrigin::signed(20), 5), - Error::::InsufficientBond - ); + assert_noop!(Staking::rebond(Origin::signed(20), 5), Error::::InsufficientBond); }) } @@ -4286,13 +4147,13 @@ fn cannot_bond_extra_to_lower_than_ed() { total: 10 * 1000, active: 10 * 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] } ); // unbond all of it. must be chilled first. - assert_ok!(Staking::chill(RuntimeOrigin::signed(20))); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(20), 10 * 1000)); + assert_ok!(Staking::chill(Origin::signed(20))); + assert_ok!(Staking::unbond(Origin::signed(20), 10 * 1000)); assert_eq!( Staking::ledger(&20).unwrap(), StakingLedger { @@ -4300,13 +4161,13 @@ fn cannot_bond_extra_to_lower_than_ed() { total: 10 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 10 * 1000, era: 3 }], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] } ); // now bond a wee bit more assert_noop!( - Staking::bond_extra(RuntimeOrigin::signed(21), 5), + Staking::bond_extra(Origin::signed(21), 5), Error::::InsufficientBond, ); }) @@ -4327,14 +4188,14 @@ fn do_not_die_when_active_is_ed() { total: 1000 * ed, active: 1000 * ed, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] } ); // when unbond all of it except ed. - assert_ok!(Staking::unbond(RuntimeOrigin::signed(20), 999 * ed)); + assert_ok!(Staking::unbond(Origin::signed(20), 999 * ed)); start_active_era(3); - assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(20), 100)); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(20), 100)); // then assert_eq!( @@ -4344,7 +4205,7 @@ fn do_not_die_when_active_is_ed() { total: ed, active: ed, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + claimed_rewards: vec![] } ); }) @@ -4354,7 +4215,7 @@ fn do_not_die_when_active_is_ed() { fn on_finalize_weight_is_nonzero() { ExtBuilder::default().build_and_execute(|| { let on_finalize_weight = ::DbWeight::get().reads(1); - assert!(>::on_initialize(1).all_gte(on_finalize_weight)); + assert!(>::on_initialize(1) >= on_finalize_weight); }) } @@ -4365,8 +4226,8 @@ mod election_data_provider { #[test] fn targets_2sec_block() { let mut validators = 1000; - while ::WeightInfo::get_npos_targets(validators) - .all_lt(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND) + while ::WeightInfo::get_npos_targets(validators) < + 2 * frame_support::weights::constants::WEIGHT_PER_SECOND { validators += 1; } @@ -4383,8 +4244,8 @@ mod election_data_provider { let slashing_spans = validators; let mut nominators = 1000; - while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) - .all_lt(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND) + while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) < + 2 * frame_support::weights::constants::WEIGHT_PER_SECOND { nominators += 1; } @@ -4437,7 +4298,7 @@ mod election_data_provider { ); // resubmit and it is back - assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![11, 21])); + assert_ok!(Staking::nominate(Origin::signed(100), vec![11, 21])); assert_eq!( ::electing_voters(None) .unwrap() @@ -4595,13 +4456,13 @@ mod election_data_provider { assert_eq!(staking_events().len(), 3); assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); - Staking::force_no_eras(RuntimeOrigin::root()).unwrap(); + Staking::force_no_eras(Origin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), u64::MAX); - Staking::force_new_era_always(RuntimeOrigin::root()).unwrap(); + Staking::force_new_era_always(Origin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); - Staking::force_new_era(RuntimeOrigin::root()).unwrap(); + Staking::force_new_era(Origin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); // Do a fail election @@ -4651,51 +4512,40 @@ fn min_bond_checks_work() { .min_validator_bond(1_500) .build_and_execute(|| { // 500 is not enough for any role - assert_ok!(Staking::bond( - RuntimeOrigin::signed(3), - 4, - 500, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); assert_noop!( - Staking::nominate(RuntimeOrigin::signed(4), vec![1]), + Staking::nominate(Origin::signed(4), vec![1]), Error::::InsufficientBond ); assert_noop!( - Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default()), + Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond, ); // 1000 is enough for nominator - assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(3), 500)); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); assert_noop!( - Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default()), + Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond, ); // 1500 is enough for validator - assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(3), 500)); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); - assert_ok!(Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default())); + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); // Can't unbond anything as validator - assert_noop!( - Staking::unbond(RuntimeOrigin::signed(4), 500), - Error::::InsufficientBond - ); + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); // Once they are a nominator, they can unbond 500 - assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 500)); - assert_noop!( - Staking::unbond(RuntimeOrigin::signed(4), 500), - Error::::InsufficientBond - ); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::unbond(Origin::signed(4), 500)); + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); // Once they are chilled they can unbond everything - assert_ok!(Staking::chill(RuntimeOrigin::signed(4))); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 1000)); + assert_ok!(Staking::chill(Origin::signed(4))); + assert_ok!(Staking::unbond(Origin::signed(4), 1000)); }) } @@ -4721,21 +4571,21 @@ fn chill_other_works() { // Nominator assert_ok!(Staking::bond( - RuntimeOrigin::signed(a), + Origin::signed(a), b, 1000, RewardDestination::Controller )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(b), vec![1])); + assert_ok!(Staking::nominate(Origin::signed(b), vec![1])); // Validator assert_ok!(Staking::bond( - RuntimeOrigin::signed(c), + Origin::signed(c), d, 1500, RewardDestination::Controller )); - assert_ok!(Staking::validate(RuntimeOrigin::signed(d), ValidatorPrefs::default())); + assert_ok!(Staking::validate(Origin::signed(d), ValidatorPrefs::default())); } // To chill other users, we need to: @@ -4748,17 +4598,17 @@ fn chill_other_works() { // Can't chill these users assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 1), + Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 3), + Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther ); // Change the minimum bond... but no limits. assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Set(1_500), ConfigOp::Set(2_000), ConfigOp::Remove, @@ -4769,17 +4619,17 @@ fn chill_other_works() { // Still can't chill these users assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 1), + Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 3), + Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther ); // Add limits, but no threshold assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Set(10), @@ -4790,17 +4640,17 @@ fn chill_other_works() { // Still can't chill these users assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 1), + Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 3), + Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther ); // Add threshold, but no limits assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Remove, @@ -4811,17 +4661,17 @@ fn chill_other_works() { // Still can't chill these users assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 1), + Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 3), + Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther ); // Add threshold and limits assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Set(10), @@ -4839,19 +4689,19 @@ fn chill_other_works() { for i in 6..15 { let b = 4 * i + 1; let d = 4 * i + 3; - assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), b)); - assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), d)); + assert_ok!(Staking::chill_other(Origin::signed(1337), b)); + assert_ok!(Staking::chill_other(Origin::signed(1337), d)); } // chill a nominator. Limit is not reached, not chill-able assert_eq!(Nominators::::count(), 7); assert_noop!( - Staking::chill_other(RuntimeOrigin::signed(1337), 1), + Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther ); // chill a validator. Limit is reached, chill-able. assert_eq!(Validators::::count(), 9); - assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), 3)); + assert_ok!(Staking::chill_other(Origin::signed(1337), 3)); }) } @@ -4866,7 +4716,7 @@ fn capped_stakers_works() { // Change the maximums let max = 10; assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Set(10), ConfigOp::Set(10), ConfigOp::Set(max), @@ -4884,10 +4734,7 @@ fn capped_stakers_works() { RewardDestination::Controller, ) .unwrap(); - assert_ok!(Staking::validate( - RuntimeOrigin::signed(controller), - ValidatorPrefs::default() - )); + assert_ok!(Staking::validate(Origin::signed(controller), ValidatorPrefs::default())); some_existing_validator = controller; } @@ -4900,7 +4747,7 @@ fn capped_stakers_works() { .unwrap(); assert_noop!( - Staking::validate(RuntimeOrigin::signed(last_validator), ValidatorPrefs::default()), + Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), Error::::TooManyValidators, ); @@ -4913,7 +4760,7 @@ fn capped_stakers_works() { RewardDestination::Controller, ) .unwrap(); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![1])); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); some_existing_nominator = controller; } @@ -4925,21 +4772,21 @@ fn capped_stakers_works() { ) .unwrap(); assert_noop!( - Staking::nominate(RuntimeOrigin::signed(last_nominator), vec![1]), + Staking::nominate(Origin::signed(last_nominator), vec![1]), Error::::TooManyNominators ); // Re-nominate works fine - assert_ok!(Staking::nominate(RuntimeOrigin::signed(some_existing_nominator), vec![1])); + assert_ok!(Staking::nominate(Origin::signed(some_existing_nominator), vec![1])); // Re-validate works fine assert_ok!(Staking::validate( - RuntimeOrigin::signed(some_existing_validator), + Origin::signed(some_existing_validator), ValidatorPrefs::default() )); // No problem when we set to `None` again assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Remove, @@ -4947,11 +4794,8 @@ fn capped_stakers_works() { ConfigOp::Noop, ConfigOp::Noop, )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(last_nominator), vec![1])); - assert_ok!(Staking::validate( - RuntimeOrigin::signed(last_validator), - ValidatorPrefs::default() - )); + assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); + assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); }) } @@ -4960,21 +4804,21 @@ fn min_commission_works() { ExtBuilder::default().build_and_execute(|| { // account 10 controls the stash from account 11 assert_ok!(Staking::validate( - RuntimeOrigin::signed(10), + Origin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } )); // event emitted should be correct assert_eq!( *staking_events().last().unwrap(), - Event::ValidatorPrefsSet { - stash: 11, - prefs: ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } - } + Event::ValidatorPrefsSet( + 11, + ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + ) ); assert_ok!(Staking::set_staking_configs( - RuntimeOrigin::root(), + Origin::root(), ConfigOp::Remove, ConfigOp::Remove, ConfigOp::Remove, @@ -4986,7 +4830,7 @@ fn min_commission_works() { // can't make it less than 10 now assert_noop!( Staking::validate( - RuntimeOrigin::signed(10), + Origin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } ), Error::::CommissionTooLow @@ -4994,12 +4838,12 @@ fn min_commission_works() { // can only change to higher. assert_ok!(Staking::validate( - RuntimeOrigin::signed(10), + Origin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(10), blocked: false } )); assert_ok!(Staking::validate( - RuntimeOrigin::signed(10), + Origin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(15), blocked: false } )); }) @@ -5082,7 +4926,7 @@ fn change_of_max_nominations() { assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 1); // now one of them can revive themselves by re-nominating to a proper value. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1])); + assert_ok!(Staking::nominate(Origin::signed(71), vec![1])); assert_eq!( Nominators::::iter() .map(|(k, n)| (k, n.targets.len())) @@ -5093,7 +4937,7 @@ fn change_of_max_nominations() { // or they can be chilled by any account. assert!(Nominators::::contains_key(101)); assert!(Nominators::::get(101).is_none()); - assert_ok!(Staking::chill_other(RuntimeOrigin::signed(70), 100)); + assert_ok!(Staking::chill_other(Origin::signed(70), 100)); assert!(!Nominators::::contains_key(101)); assert!(Nominators::::get(101).is_none()); }) @@ -5117,7 +4961,7 @@ mod sorted_list_provider { ); // when account 101 renominates - assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![41])); + assert_ok!(Staking::nominate(Origin::signed(100), vec![41])); // then counts don't change assert_eq!(::VoterList::count(), pre_insert_voter_count); @@ -5140,7 +4984,7 @@ mod sorted_list_provider { assert_eq!(::VoterList::iter().collect::>(), vec![11, 21, 31]); // when account 11 re-validates - assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); // then counts don't change assert_eq!(::VoterList::count(), pre_insert_voter_count); @@ -5155,31 +4999,31 @@ fn force_apply_min_commission_works() { let prefs = |c| ValidatorPrefs { commission: Perbill::from_percent(c), blocked: false }; let validators = || Validators::::iter().collect::>(); ExtBuilder::default().build_and_execute(|| { - assert_ok!(Staking::validate(RuntimeOrigin::signed(30), prefs(10))); - assert_ok!(Staking::validate(RuntimeOrigin::signed(20), prefs(5))); + assert_ok!(Staking::validate(Origin::signed(30), prefs(10))); + assert_ok!(Staking::validate(Origin::signed(20), prefs(5))); // Given assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); MinCommission::::set(Perbill::from_percent(5)); // When applying to a commission greater than min - assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 31)); + assert_ok!(Staking::force_apply_min_commission(Origin::signed(1), 31)); // Then the commission is not changed assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); // When applying to a commission that is equal to min - assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 21)); + assert_ok!(Staking::force_apply_min_commission(Origin::signed(1), 21)); // Then the commission is not changed assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); // When applying to a commission that is less than the min - assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 11)); + assert_ok!(Staking::force_apply_min_commission(Origin::signed(1), 11)); // Then the commission is bumped to the min assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(5))]); // When applying commission to a validator that doesn't exist then storage is not altered assert_noop!( - Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 420), + Staking::force_apply_min_commission(Origin::signed(1), 420), Error::::NotStash ); }); @@ -5195,7 +5039,7 @@ fn proportional_slash_stop_slashing_if_remaining_zero() { active: 20, // we have some chunks, but they are not affected. unlocking: bounded_vec![c(1, 10), c(2, 10)], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }; assert_eq!(BondingDuration::get(), 3); @@ -5213,7 +5057,7 @@ fn proportional_ledger_slash_works() { total: 10, active: 10, unlocking: bounded_vec![], - claimed_rewards: bounded_vec![], + claimed_rewards: vec![], }; assert_eq!(BondingDuration::get(), 3); @@ -5258,18 +5102,6 @@ fn proportional_ledger_slash_works() { assert_eq!(LedgerSlashPerEra::get().0, 0); assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 30), (7, 30)])); - // Given - ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)]; - ledger.total = 4 * 100; - ledger.active = 0; - // When the first 2 chunks don't overlap with the affected range of unlock eras. - assert_eq!(ledger.slash(15, 0, 3), 15); - // Then - assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 100 - 8), c(7, 100 - 7)]); - assert_eq!(ledger.total, 4 * 100 - 15); - assert_eq!(LedgerSlashPerEra::get().0, 0); - assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 92), (7, 93)])); - // Given ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)]; ledger.active = 500; @@ -5392,7 +5224,6 @@ fn proportional_ledger_slash_works() { assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0), (6, 0), (7, 0)])); // Given - use sp_runtime::PerThing as _; let slash = u64::MAX as Balance * 2; let value = u64::MAX as Balance * 2; let unit = 100; @@ -5405,19 +5236,18 @@ fn proportional_ledger_slash_works() { ledger.active = unit; ledger.total = unit * 4 + value; // When - assert_eq!(ledger.slash(slash, 0, 0), slash - 5); + assert_eq!(ledger.slash(slash, 0, 0), slash - 43); // Then // The amount slashed out of `unit` let affected_balance = value + unit * 4; - let ratio = - Perquintill::from_rational_with_rounding(slash, affected_balance, Rounding::Up).unwrap(); + let ratio = Perquintill::from_rational(slash, affected_balance); // `unit` after the slash is applied let unit_slashed = { - let unit_slash = ratio.mul_ceil(unit); + let unit_slash = ratio * unit; unit - unit_slash }; let value_slashed = { - let value_slash = ratio.mul_ceil(value); + let value_slash = ratio * value; value - value_slash }; assert_eq!(ledger.active, unit_slashed); @@ -5429,252 +5259,3 @@ fn proportional_ledger_slash_works() { BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 0)]) ); } - -#[test] -fn pre_bonding_era_cannot_be_claimed() { - // Verifies initial conditions of mock - ExtBuilder::default().nominate(false).build_and_execute(|| { - let history_depth = HistoryDepth::get(); - // jump to some era above history_depth - let mut current_era = history_depth + 10; - let last_reward_era = current_era - 1; - let start_reward_era = current_era - history_depth; - - // put some money in stash=3 and controller=4. - for i in 3..5 { - let _ = Balances::make_free_balance_be(&i, 2000); - } - - mock::start_active_era(current_era); - - // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 1500, RewardDestination::Controller)); - - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(&4).unwrap(), - StakingLedger { - stash: 3, - total: 1500, - active: 1500, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // start next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // claiming reward for last era in which validator was active works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 1)); - - // consumed weight for all payout_stakers dispatches that fail - let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); - // cannot claim rewards for an era before bonding occured as it is - // already marked as claimed. - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 2), - Error::::AlreadyClaimed.with_weight(err_weight) - ); - - // decoding will fail now since Staking Ledger is in corrupt state - HistoryDepth::set(history_depth - 1); - assert_eq!(Staking::ledger(&4), None); - - // make sure stakers still cannot claim rewards that they are not meant to - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 2), - Error::::NotController - ); - - // fix the corrupted state for post conditions check - HistoryDepth::set(history_depth); - }); -} - -#[test] -fn reducing_history_depth_abrupt() { - // Verifies initial conditions of mock - ExtBuilder::default().nominate(false).build_and_execute(|| { - let original_history_depth = HistoryDepth::get(); - let mut current_era = original_history_depth + 10; - let last_reward_era = current_era - 1; - let start_reward_era = current_era - original_history_depth; - - // put some money in (stash, controller)=(3,4),(5,6). - for i in 3..7 { - let _ = Balances::make_free_balance_be(&i, 2000); - } - - // start current era - mock::start_active_era(current_era); - - // add a new candidate for being a staker. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 1500, RewardDestination::Controller)); - - // all previous era before the bonding action should be marked as - // claimed. - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(&4).unwrap(), - StakingLedger { - stash: 3, - total: 1500, - active: 1500, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // claiming reward for last era in which validator was active works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 1)); - - // next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // history_depth reduced without migration - let history_depth = original_history_depth - 1; - HistoryDepth::set(history_depth); - // claiming reward does not work anymore - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 1), - Error::::NotController - ); - - // new stakers can still bond - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 6, 1200, RewardDestination::Controller)); - - // new staking ledgers created will be bounded by the current history depth - let last_reward_era = current_era - 1; - let start_reward_era = current_era - history_depth; - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(&6).unwrap(), - StakingLedger { - stash: 5, - total: 1200, - active: 1200, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // fix the corrupted state for post conditions check - HistoryDepth::set(original_history_depth); - }); -} - -#[test] -fn reducing_max_unlocking_chunks_abrupt() { - // Concern is on validators only - // By Default 11, 10 are stash and ctrl and 21,20 - ExtBuilder::default().build_and_execute(|| { - // given a staker at era=10 and MaxUnlockChunks set to 2 - MaxUnlockingChunks::set(2); - start_active_era(10); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 300, RewardDestination::Staked)); - assert!(matches!(Staking::ledger(4), Some(_))); - - // when staker unbonds - assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 20)); - - // then an unlocking chunk is added at `current_era + bonding_duration` - // => 10 + 3 = 13 - let expected_unlocking: BoundedVec, MaxUnlockingChunks> = - bounded_vec![UnlockChunk { value: 20 as Balance, era: 13 as EraIndex }]; - assert!(matches!(Staking::ledger(4), - Some(StakingLedger { - unlocking, - .. - }) if unlocking==expected_unlocking)); - - // when staker unbonds at next era - start_active_era(11); - assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 50)); - // then another unlock chunk is added - let expected_unlocking: BoundedVec, MaxUnlockingChunks> = - bounded_vec![UnlockChunk { value: 20, era: 13 }, UnlockChunk { value: 50, era: 14 }]; - assert!(matches!(Staking::ledger(4), - Some(StakingLedger { - unlocking, - .. - }) if unlocking==expected_unlocking)); - - // when staker unbonds further - start_active_era(12); - // then further unbonding not possible - assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NoMoreChunks); - - // when max unlocking chunks is reduced abruptly to a low value - MaxUnlockingChunks::set(1); - // then unbond, rebond ops are blocked with ledger in corrupt state - assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NotController); - assert_noop!(Staking::rebond(RuntimeOrigin::signed(4), 100), Error::::NotController); - - // reset the ledger corruption - MaxUnlockingChunks::set(2); - }) -} - -#[test] -fn cannot_set_unsupported_validator_count() { - ExtBuilder::default().build_and_execute(|| { - MaxWinners::set(50); - // set validator count works - assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 30)); - assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 50)); - // setting validator count above 100 does not work - assert_noop!( - Staking::set_validator_count(RuntimeOrigin::root(), 51), - Error::::TooManyValidators, - ); - }) -} - -#[test] -fn increase_validator_count_errors() { - ExtBuilder::default().build_and_execute(|| { - MaxWinners::set(50); - assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 40)); - - // increase works - assert_ok!(Staking::increase_validator_count(RuntimeOrigin::root(), 6)); - assert_eq!(ValidatorCount::::get(), 46); - - // errors - assert_noop!( - Staking::increase_validator_count(RuntimeOrigin::root(), 5), - Error::::TooManyValidators, - ); - }) -} - -#[test] -fn scale_validator_count_errors() { - ExtBuilder::default().build_and_execute(|| { - MaxWinners::set(50); - assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 20)); - - // scale value works - assert_ok!(Staking::scale_validator_count( - RuntimeOrigin::root(), - Percent::from_percent(200) - )); - assert_eq!(ValidatorCount::::get(), 40); - - // errors - assert_noop!( - Staking::scale_validator_count(RuntimeOrigin::root(), Percent::from_percent(126)), - Error::::TooManyValidators, - ); - }) -} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 56374ffbc4b62..1bdfb01bddc86 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/staking/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/staking/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -68,6 +65,7 @@ pub trait WeightInfo { fn payout_stakers_dead_controller(n: u32, ) -> Weight; fn payout_stakers_alive_staked(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; + fn set_history_depth(e: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; fn new_era(v: u32, n: u32, ) -> Weight; fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; @@ -84,24 +82,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - // Minimum execution time: 53_097 nanoseconds. - Weight::from_ref_time(53_708_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (43_992_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - // Minimum execution time: 92_199 nanoseconds. - Weight::from_ref_time(93_541_000 as u64) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().writes(7 as u64)) + (75_827_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(7 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -109,27 +106,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) + // Storage: BagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - // Minimum execution time: 98_227 nanoseconds. - Weight::from_ref_time(99_070_000 as u64) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().writes(8 as u64)) + (81_075_000 as Weight) + .saturating_add(T::DbWeight::get().reads(12 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 45_058 nanoseconds. - Weight::from_ref_time(46_592_713 as u64) - // Standard Error: 413 - .saturating_add(Weight::from_ref_time(63_036 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (35_763_000 as Weight) + // Standard Error: 0 + .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -138,18 +132,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) - /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - // Minimum execution time: 86_087 nanoseconds. - Weight::from_ref_time(87_627_894 as u64) - .saturating_add(T::DbWeight::get().reads(13 as u64)) - .saturating_add(T::DbWeight::get().writes(11 as u64)) + (66_938_000 as Weight) + .saturating_add(T::DbWeight::get().reads(13 as Weight)) + .saturating_add(T::DbWeight::get().writes(11 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -158,27 +150,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxValidatorsCount (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - // Minimum execution time: 67_690 nanoseconds. - Weight::from_ref_time(68_348_000 as u64) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + (52_943_000 as Weight) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) - /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - // Minimum execution time: 43_512 nanoseconds. - Weight::from_ref_time(47_300_477 as u64) - // Standard Error: 11_609 - .saturating_add(Weight::from_ref_time(6_770_405 as u64).saturating_mul(k as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(k as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) + (23_264_000 as Weight) + // Standard Error: 11_000 + .saturating_add((8_006_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -187,170 +176,168 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 74_296 nanoseconds. - Weight::from_ref_time(73_201_782 as u64) - // Standard Error: 5_007 - .saturating_add(Weight::from_ref_time(2_810_370 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (56_596_000 as Weight) + // Standard Error: 14_000 + .saturating_add((3_644_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(12 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 66_605 nanoseconds. - Weight::from_ref_time(67_279_000 as u64) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (51_117_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - // Minimum execution time: 18_897 nanoseconds. - Weight::from_ref_time(19_357_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (11_223_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - // Minimum execution time: 26_509 nanoseconds. - Weight::from_ref_time(26_961_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (19_826_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - // Minimum execution time: 5_025 nanoseconds. - Weight::from_ref_time(5_240_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_789_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - // Minimum execution time: 5_107 nanoseconds. - Weight::from_ref_time(5_320_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_793_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - // Minimum execution time: 5_094 nanoseconds. - Weight::from_ref_time(5_377_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_802_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - // Minimum execution time: 5_219 nanoseconds. - Weight::from_ref_time(5_434_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (3_762_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking Invulnerables (r:0 w:1) - /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - // Minimum execution time: 5_122 nanoseconds. - Weight::from_ref_time(5_977_533 as u64) - // Standard Error: 34 - .saturating_add(Weight::from_ref_time(10_205 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (4_318_000 as Weight) + // Standard Error: 0 + .saturating_add((10_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) - /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - // Minimum execution time: 80_216 nanoseconds. - Weight::from_ref_time(86_090_609 as u64) - // Standard Error: 2_006 - .saturating_add(Weight::from_ref_time(1_039_308 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (65_265_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_029_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) - /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - // Minimum execution time: 92_034 nanoseconds. - Weight::from_ref_time(896_585_370 as u64) - // Standard Error: 58_231 - .saturating_add(Weight::from_ref_time(4_908_277 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (903_312_000 as Weight) + // Standard Error: 56_000 + .saturating_add((4_968_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:1 w:0) - // Storage: System Account (r:1 w:1) - /// The range of component `n` is `[0, 256]`. + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Minimum execution time: 127_936 nanoseconds. - Weight::from_ref_time(184_556_084 as u64) - // Standard Error: 26_981 - .saturating_add(Weight::from_ref_time(21_786_304 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(9 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + (87_569_000 as Weight) + // Standard Error: 14_000 + .saturating_add((24_232_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:2 w:2) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:1 w:0) - // Storage: System Account (r:1 w:1) - // Storage: Balances Locks (r:1 w:1) - /// The range of component `n` is `[0, 256]`. + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - // Minimum execution time: 157_778 nanoseconds. - Weight::from_ref_time(223_306_359 as u64) - // Standard Error: 27_216 - .saturating_add(Weight::from_ref_time(30_612_663 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(10 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(n as u64))) + (98_839_000 as Weight) + // Standard Error: 21_000 + .saturating_add((34_480_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) + // Storage: BagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListBags (r:2 w:2) - /// The range of component `l` is `[1, 32]`. + // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - // Minimum execution time: 92_880 nanoseconds. - Weight::from_ref_time(94_434_663 as u64) - // Standard Error: 1_734 - .saturating_add(Weight::from_ref_time(34_453 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(9 as u64)) - .saturating_add(T::DbWeight::get().writes(8 as u64)) + (74_865_000 as Weight) + // Standard Error: 3_000 + .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(9 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:1) + // Storage: Staking ErasStakersClipped (r:0 w:2) + // Storage: Staking ErasValidatorPrefs (r:0 w:2) + // Storage: Staking ErasValidatorReward (r:0 w:1) + // Storage: Staking ErasRewardPoints (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:2) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + fn set_history_depth(e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 62_000 + .saturating_add((22_829_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -359,26 +346,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) - /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - // Minimum execution time: 92_334 nanoseconds. - Weight::from_ref_time(95_207_614 as u64) - // Standard Error: 1_822 - .saturating_add(Weight::from_ref_time(1_036_787 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) - } - // Storage: VoterList CounterForListNodes (r:1 w:0) + (70_933_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_021_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(12 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: VoterList ListBags (r:200 w:0) - // Storage: VoterList ListNodes (r:101 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking Bonded (r:101 w:0) @@ -387,59 +372,52 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ValidatorCount (r:1 w:0) // Storage: Staking MinimumValidatorCount (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:1) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasStakersClipped (r:0 w:1) // Storage: Staking ErasValidatorPrefs (r:0 w:1) // Storage: Staking ErasStakers (r:0 w:1) // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// The range of component `v` is `[1, 10]`. - /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 535_169 nanoseconds. - Weight::from_ref_time(548_667_000 as u64) - // Standard Error: 1_759_252 - .saturating_add(Weight::from_ref_time(58_283_319 as u64).saturating_mul(v as u64)) - // Standard Error: 175_299 - .saturating_add(Weight::from_ref_time(13_578_512 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(207 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) - } - // Storage: VoterList CounterForListNodes (r:1 w:0) + (0 as Weight) + // Standard Error: 897_000 + .saturating_add((213_100_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 45_000 + .saturating_add((31_123_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(208 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: VoterList ListBags (r:200 w:0) - // Storage: VoterList ListNodes (r:1500 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) - /// The range of component `v` is `[500, 1000]`. - /// The range of component `n` is `[500, 1000]`. - /// The range of component `s` is `[1, 20]`. fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - // Minimum execution time: 25_323_129 nanoseconds. - Weight::from_ref_time(25_471_672_000 as u64) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_665_504 as u64).saturating_mul(v as u64)) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_956_606 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(202 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + (0 as Weight) + // Standard Error: 116_000 + .saturating_add((23_745_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 116_000 + .saturating_add((22_497_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_968_000 + .saturating_add((20_676_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(202 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } - // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) - /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - // Minimum execution time: 4_905_036 nanoseconds. - Weight::from_ref_time(78_163_554 as u64) - // Standard Error: 23_723 - .saturating_add(Weight::from_ref_time(9_784_870 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + (0 as Weight) + // Standard Error: 36_000 + .saturating_add((8_097_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -448,9 +426,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - // Minimum execution time: 10_096 nanoseconds. - Weight::from_ref_time(10_538_000 as u64) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (7_041_000 as Weight) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -459,9 +436,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - // Minimum execution time: 9_045 nanoseconds. - Weight::from_ref_time(9_379_000 as u64) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (6_495_000 as Weight) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -470,22 +446,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - // Minimum execution time: 81_457 nanoseconds. - Weight::from_ref_time(82_410_000 as u64) - .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + (62_014_000 as Weight) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - // Minimum execution time: 19_684 nanoseconds. - Weight::from_ref_time(20_059_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (12_814_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } @@ -494,24 +468,23 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - // Minimum execution time: 53_097 nanoseconds. - Weight::from_ref_time(53_708_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (43_992_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - // Minimum execution time: 92_199 nanoseconds. - Weight::from_ref_time(93_541_000 as u64) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().writes(7 as u64)) + (75_827_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(7 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -519,27 +492,24 @@ impl WeightInfo for () { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) + // Storage: BagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListBags (r:2 w:2) + // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - // Minimum execution time: 98_227 nanoseconds. - Weight::from_ref_time(99_070_000 as u64) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().writes(8 as u64)) + (81_075_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(12 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 45_058 nanoseconds. - Weight::from_ref_time(46_592_713 as u64) - // Standard Error: 413 - .saturating_add(Weight::from_ref_time(63_036 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (35_763_000 as Weight) + // Standard Error: 0 + .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -548,18 +518,16 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) - /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - // Minimum execution time: 86_087 nanoseconds. - Weight::from_ref_time(87_627_894 as u64) - .saturating_add(RocksDbWeight::get().reads(13 as u64)) - .saturating_add(RocksDbWeight::get().writes(11 as u64)) + (66_938_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(13 as Weight)) + .saturating_add(RocksDbWeight::get().writes(11 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -568,27 +536,24 @@ impl WeightInfo for () { // Storage: Staking MaxValidatorsCount (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListNodes (r:1 w:1) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - // Minimum execution time: 67_690 nanoseconds. - Weight::from_ref_time(68_348_000 as u64) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + (52_943_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) - /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - // Minimum execution time: 43_512 nanoseconds. - Weight::from_ref_time(47_300_477 as u64) - // Standard Error: 11_609 - .saturating_add(Weight::from_ref_time(6_770_405 as u64).saturating_mul(k as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(k as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) + (23_264_000 as Weight) + // Standard Error: 11_000 + .saturating_add((8_006_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -597,170 +562,168 @@ impl WeightInfo for () { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 74_296 nanoseconds. - Weight::from_ref_time(73_201_782 as u64) - // Standard Error: 5_007 - .saturating_add(Weight::from_ref_time(2_810_370 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (56_596_000 as Weight) + // Standard Error: 14_000 + .saturating_add((3_644_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(12 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 66_605 nanoseconds. - Weight::from_ref_time(67_279_000 as u64) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (51_117_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - // Minimum execution time: 18_897 nanoseconds. - Weight::from_ref_time(19_357_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (11_223_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - // Minimum execution time: 26_509 nanoseconds. - Weight::from_ref_time(26_961_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (19_826_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - // Minimum execution time: 5_025 nanoseconds. - Weight::from_ref_time(5_240_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_789_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - // Minimum execution time: 5_107 nanoseconds. - Weight::from_ref_time(5_320_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_793_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - // Minimum execution time: 5_094 nanoseconds. - Weight::from_ref_time(5_377_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_802_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - // Minimum execution time: 5_219 nanoseconds. - Weight::from_ref_time(5_434_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (3_762_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking Invulnerables (r:0 w:1) - /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - // Minimum execution time: 5_122 nanoseconds. - Weight::from_ref_time(5_977_533 as u64) - // Standard Error: 34 - .saturating_add(Weight::from_ref_time(10_205 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (4_318_000 as Weight) + // Standard Error: 0 + .saturating_add((10_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) - /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - // Minimum execution time: 80_216 nanoseconds. - Weight::from_ref_time(86_090_609 as u64) - // Standard Error: 2_006 - .saturating_add(Weight::from_ref_time(1_039_308 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + (65_265_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_029_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) - /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - // Minimum execution time: 92_034 nanoseconds. - Weight::from_ref_time(896_585_370 as u64) - // Standard Error: 58_231 - .saturating_add(Weight::from_ref_time(4_908_277 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (903_312_000 as Weight) + // Standard Error: 56_000 + .saturating_add((4_968_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:1 w:0) - // Storage: System Account (r:1 w:1) - /// The range of component `n` is `[0, 256]`. + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Minimum execution time: 127_936 nanoseconds. - Weight::from_ref_time(184_556_084 as u64) - // Standard Error: 26_981 - .saturating_add(Weight::from_ref_time(21_786_304 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(9 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) + (87_569_000 as Weight) + // Standard Error: 14_000 + .saturating_add((24_232_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:2 w:2) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:1 w:0) - // Storage: System Account (r:1 w:1) - // Storage: Balances Locks (r:1 w:1) - /// The range of component `n` is `[0, 256]`. + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - // Minimum execution time: 157_778 nanoseconds. - Weight::from_ref_time(223_306_359 as u64) - // Standard Error: 27_216 - .saturating_add(Weight::from_ref_time(30_612_663 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(10 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(n as u64))) + (98_839_000 as Weight) + // Standard Error: 21_000 + .saturating_add((34_480_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterList ListNodes (r:3 w:3) + // Storage: BagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterList ListBags (r:2 w:2) - /// The range of component `l` is `[1, 32]`. + // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - // Minimum execution time: 92_880 nanoseconds. - Weight::from_ref_time(94_434_663 as u64) - // Standard Error: 1_734 - .saturating_add(Weight::from_ref_time(34_453 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(9 as u64)) - .saturating_add(RocksDbWeight::get().writes(8 as u64)) + (74_865_000 as Weight) + // Standard Error: 3_000 + .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(9 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:1) + // Storage: Staking ErasStakersClipped (r:0 w:2) + // Storage: Staking ErasValidatorPrefs (r:0 w:2) + // Storage: Staking ErasValidatorReward (r:0 w:1) + // Storage: Staking ErasRewardPoints (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:2) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + fn set_history_depth(e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 62_000 + .saturating_add((22_829_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -769,26 +732,24 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) - /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - // Minimum execution time: 92_334 nanoseconds. - Weight::from_ref_time(95_207_614 as u64) - // Standard Error: 1_822 - .saturating_add(Weight::from_ref_time(1_036_787 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) - } - // Storage: VoterList CounterForListNodes (r:1 w:0) + (70_933_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_021_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(12 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: VoterList ListBags (r:200 w:0) - // Storage: VoterList ListNodes (r:101 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking Bonded (r:101 w:0) @@ -797,59 +758,52 @@ impl WeightInfo for () { // Storage: Staking ValidatorCount (r:1 w:0) // Storage: Staking MinimumValidatorCount (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:1) + // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasStakersClipped (r:0 w:1) // Storage: Staking ErasValidatorPrefs (r:0 w:1) // Storage: Staking ErasStakers (r:0 w:1) // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// The range of component `v` is `[1, 10]`. - /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 535_169 nanoseconds. - Weight::from_ref_time(548_667_000 as u64) - // Standard Error: 1_759_252 - .saturating_add(Weight::from_ref_time(58_283_319 as u64).saturating_mul(v as u64)) - // Standard Error: 175_299 - .saturating_add(Weight::from_ref_time(13_578_512 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(207 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) - } - // Storage: VoterList CounterForListNodes (r:1 w:0) + (0 as Weight) + // Standard Error: 897_000 + .saturating_add((213_100_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 45_000 + .saturating_add((31_123_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(208 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: VoterList ListBags (r:200 w:0) - // Storage: VoterList ListNodes (r:1500 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) - /// The range of component `v` is `[500, 1000]`. - /// The range of component `n` is `[500, 1000]`. - /// The range of component `s` is `[1, 20]`. fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - // Minimum execution time: 25_323_129 nanoseconds. - Weight::from_ref_time(25_471_672_000 as u64) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_665_504 as u64).saturating_mul(v as u64)) - // Standard Error: 266_391 - .saturating_add(Weight::from_ref_time(6_956_606 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(202 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + (0 as Weight) + // Standard Error: 116_000 + .saturating_add((23_745_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 116_000 + .saturating_add((22_497_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_968_000 + .saturating_add((20_676_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(202 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } - // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) - /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - // Minimum execution time: 4_905_036 nanoseconds. - Weight::from_ref_time(78_163_554 as u64) - // Standard Error: 23_723 - .saturating_add(Weight::from_ref_time(9_784_870 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + (0 as Weight) + // Standard Error: 36_000 + .saturating_add((8_097_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -858,9 +812,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - // Minimum execution time: 10_096 nanoseconds. - Weight::from_ref_time(10_538_000 as u64) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (7_041_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -869,9 +822,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - // Minimum execution time: 9_045 nanoseconds. - Weight::from_ref_time(9_379_000 as u64) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (6_495_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -880,21 +832,19 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: VoterList ListNodes (r:2 w:2) - // Storage: VoterList ListBags (r:1 w:1) - // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - // Minimum execution time: 81_457 nanoseconds. - Weight::from_ref_time(82_410_000 as u64) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + (62_014_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - // Minimum execution time: 19_684 nanoseconds. - Weight::from_ref_time(20_059_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (12_814_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index b803aad69263f..5a93701141da9 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -30,7 +30,7 @@ sp-std = { default-features = false, path = "../../primitives/std" } substrate-state-trie-migration-rpc = { optional = true, path = "../../utils/frame/rpc/state-trie-migration-rpc" } [dev-dependencies] -parking_lot = "0.12.1" +parking_lot = "0.12.0" tokio = { version = "1.17.0", features = ["macros"] } pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } @@ -39,7 +39,7 @@ sp-tracing = { path = "../../primitives/tracing" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 5255d4f6f3800..94f6c1f223b9c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -235,10 +235,7 @@ pub mod pallet { /// reading a key, we simply cannot know how many bytes it is. In other words, this should /// not be used in any environment where resources are strictly bounded (e.g. a parachain), /// but it is acceptable otherwise (relay chain, offchain workers). - pub fn migrate_until_exhaustion( - &mut self, - limits: MigrationLimits, - ) -> Result<(), Error> { + pub fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) -> DispatchResult { log!(debug, "running migrations on top of {:?} until {:?}", self, limits); if limits.item.is_zero() || limits.size.is_zero() { @@ -265,7 +262,7 @@ pub mod pallet { /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// /// This function is *the* core of this entire pallet. - fn migrate_tick(&mut self) -> Result<(), Error> { + fn migrate_tick(&mut self) -> DispatchResult { match (&self.progress_top, &self.progress_child) { (Progress::ToStart, _) => self.migrate_top(), (Progress::LastKey(_), Progress::LastKey(_)) => { @@ -304,7 +301,7 @@ pub mod pallet { /// Migrate the current child key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_child(&mut self) -> Result<(), Error> { + fn migrate_child(&mut self) -> DispatchResult { use sp_io::default_child_storage as child_io; let (maybe_current_child, child_root) = match (&self.progress_child, &self.progress_top) { @@ -353,7 +350,7 @@ pub mod pallet { /// Migrate the current top key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_top(&mut self) -> Result<(), Error> { + fn migrate_top(&mut self) -> DispatchResult { let maybe_current_top = match &self.progress_top { Progress::LastKey(last_top) => { let maybe_top: Option> = @@ -434,7 +431,7 @@ pub mod pallet { /// The auto migration task finished. AutoMigrationFinished, /// Migration got halted due to an error or miss-configuration. - Halted { error: Error }, + Halted, } /// The outer Pallet struct. @@ -446,13 +443,13 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// Origin that can control the configurations of this pallet. - type ControlOrigin: frame_support::traits::EnsureOrigin; + type ControlOrigin: frame_support::traits::EnsureOrigin; /// Filter on which origin that trigger the manual migrations. - type SignedFilter: EnsureOrigin; + type SignedFilter: EnsureOrigin; /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The currency provider type. type Currency: Currency; @@ -519,9 +516,8 @@ pub mod pallet { pub type SignedMigrationMaxLimits = StorageValue<_, MigrationLimits, OptionQuery>; #[pallet::error] - #[derive(Clone, PartialEq)] pub enum Error { - /// Max signed limits not respected. + /// max signed limits not respected. MaxSignedLimits, /// A key was longer than the configured maximum. /// @@ -533,12 +529,12 @@ pub mod pallet { KeyTooLong, /// submitter does not have enough funds. NotEnoughFunds, - /// Bad witness data provided. + /// bad witness data provided. BadWitness, + /// upper bound of size is exceeded, + SizeUpperBoundExceeded, /// Signed migration is not allowed because the maximum limit is not set yet. SignedMigrationNotAllowed, - /// Bad child root provided. - BadChildRoot, } #[pallet::call] @@ -621,7 +617,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Ok(().into()) + return Err(Error::::SizeUpperBoundExceeded.into()) } Self::deposit_event(Event::::Migrated { @@ -638,10 +634,13 @@ pub mod pallet { MigrationProcess::::put(task); let post_info = PostDispatchInfo { actual_weight, pays_fee: Pays::No }; - if let Err(error) = migration { - Self::halt(error); + match migration { + Ok(_) => Ok(post_info), + Err(error) => { + Self::halt(&error); + Err(DispatchErrorWithPostInfo { post_info, error }) + }, } - Ok(post_info) } /// Migrate the list of top keys by iterating each of them one by one. @@ -680,7 +679,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - Ok(().into()) + Err("wrong witness data".into()) } else { Self::deposit_event(Event::::Migrated { top: keys.len() as u32, @@ -742,9 +741,12 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); Self::deposit_event(Event::::Slashed { who, amount: deposit }); - Ok(PostDispatchInfo { - actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), - pays_fee: Pays::Yes, + Err(DispatchErrorWithPostInfo { + error: "bad witness".into(), + post_info: PostDispatchInfo { + actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), + pays_fee: Pays::Yes, + }, }) } else { Self::deposit_event(Event::::Migrated { @@ -804,7 +806,7 @@ pub mod pallet { if let Some(limits) = Self::auto_limits() { let mut task = Self::migration_process(); if let Err(e) = task.migrate_until_exhaustion(limits) { - Self::halt(e); + Self::halt(&e); } let weight = Self::dynamic_weight(task.dyn_total_items(), task.dyn_size); @@ -839,19 +841,18 @@ pub mod pallet { impl Pallet { /// The real weight of a migration of the given number of `items` with total `size`. fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight { - let items = items as u64; - ::DbWeight::get() - .reads_writes(1, 1) - .saturating_mul(items) + let items = items as Weight; + items + .saturating_mul(::DbWeight::get().reads_writes(1, 1)) // we assume that the read/write per-byte weight is the same for child and top tree. .saturating_add(T::WeightInfo::process_top_key(size)) } /// Put a stop to all ongoing migrations and logs an error. - fn halt(error: Error) { - log!(error, "migration halted due to: {:?}", error); + fn halt(msg: &E) { + log!(error, "migration halted due to: {:?}", msg); AutoLimits::::kill(); - Self::deposit_event(Event::::Halted { error }); + Self::deposit_event(Event::::Halted); } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. @@ -870,7 +871,7 @@ pub mod pallet { fn transform_child_key_or_halt(root: &Vec) -> &[u8] { let key = Self::transform_child_key(root); if key.is_none() { - Self::halt(Error::::BadChildRoot); + Self::halt("bad child root key"); } key.unwrap_or_default() } @@ -960,16 +961,8 @@ mod benchmarks { frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_ok() - ); - - frame_system::Pallet::::assert_last_event( - ::RuntimeEvent::from(crate::Event::Slashed { - who: caller.clone(), - amount: T::SignedDepositBase::get() - .saturating_add(T::SignedDepositPerItem::get().saturating_mul(1u32.into())), - }).into(), - ); + ).is_err() + ) } verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); @@ -1012,7 +1005,7 @@ mod benchmarks { StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_ok() + ).is_err() ) } verify { @@ -1084,8 +1077,8 @@ mod mock { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u32; type Hash = H256; @@ -1093,7 +1086,7 @@ mod mock { type AccountId = u64; type Lookup = IdentityLookup; type Header = sp_runtime::generic::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU32<250>; type DbWeight = (); type Version = (); @@ -1115,7 +1108,7 @@ mod mock { impl pallet_balances::Config for Test { type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -1130,30 +1123,30 @@ mod mock { impl WeightInfo for StateMigrationTestWeight { fn process_top_key(_: u32) -> Weight { - Weight::from_ref_time(1000000) + 1000000 } fn continue_migrate() -> Weight { - Weight::from_ref_time(1000000) + 1000000 } fn continue_migrate_wrong_witness() -> Weight { - Weight::from_ref_time(1000000) + 1000000 } fn migrate_custom_top_fail() -> Weight { - Weight::from_ref_time(1000000) + 1000000 } fn migrate_custom_top_success() -> Weight { - Weight::from_ref_time(1000000) + 1000000 } fn migrate_custom_child_fail() -> Weight { - Weight::from_ref_time(1000000) + 1000000 } fn migrate_custom_child_success() -> Weight { - Weight::from_ref_time(1000000) + 1000000 } } impl pallet_state_trie_migration::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ControlOrigin = EnsureRoot; type Currency = Balances; type MaxKeyLen = MigrationMaxKeyLen; @@ -1244,9 +1237,9 @@ mod mock { (custom_storage, version).into() } - pub(crate) fn run_to_block(n: u32) -> (H256, Weight) { + pub(crate) fn run_to_block(n: u32) -> (H256, u64) { let mut root = Default::default(); - let mut weight_sum = Weight::zero(); + let mut weight_sum = 0; log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); @@ -1254,7 +1247,7 @@ mod mock { weight_sum += StateTrieMigration::on_initialize(System::block_number()); - root = *System::finalize().state_root(); + root = System::finalize().state_root().clone(); System::on_finalize(System::block_number()); } (root, weight_sum) @@ -1292,16 +1285,18 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_ok!(StateTrieMigration::continue_migrate( - RuntimeOrigin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ),); - // The auto migration halted. - System::assert_last_event( - crate::Event::Halted { error: Error::::KeyTooLong }.into(), + frame_support::assert_err_with_weight!( + StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + ), + Error::::KeyTooLong, + Some(2000000), ); + // The auto migration halted. + System::assert_last_event(crate::Event::Halted {}.into()); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1327,16 +1322,18 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_ok!(StateTrieMigration::continue_migrate( - RuntimeOrigin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - )); - // The auto migration halted. - System::assert_last_event( - crate::Event::Halted { error: Error::::KeyTooLong }.into(), + frame_support::assert_err_with_weight!( + StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + ), + Error::::KeyTooLong, + Some(2000000), ); + // The auto migration halted. + System::assert_last_event(crate::Event::Halted {}.into()); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1457,7 +1454,7 @@ mod test { // can't submit if limit is too high. frame_support::assert_err!( StateTrieMigration::continue_migrate( - RuntimeOrigin::signed(1), + Origin::signed(1), MigrationLimits { item: 5, size: sp_runtime::traits::Bounded::max_value() }, Bounded::max_value(), MigrationProcess::::get() @@ -1468,7 +1465,7 @@ mod test { // can't submit if poor. frame_support::assert_err!( StateTrieMigration::continue_migrate( - RuntimeOrigin::signed(2), + Origin::signed(2), MigrationLimits { item: 5, size: 100 }, 100, MigrationProcess::::get() @@ -1479,7 +1476,7 @@ mod test { // can't submit with bad witness. frame_support::assert_err_ignore_postinfo!( StateTrieMigration::continue_migrate( - RuntimeOrigin::signed(1), + Origin::signed(1), MigrationLimits { item: 5, size: 100 }, 100, MigrationTask { @@ -1487,7 +1484,7 @@ mod test { ..Default::default() } ), - Error::::BadWitness, + Error::::BadWitness ); // migrate all keys in a series of submissions @@ -1500,7 +1497,7 @@ mod test { assert!(result.is_ok()); frame_support::assert_ok!(StateTrieMigration::continue_migrate( - RuntimeOrigin::signed(1), + Origin::signed(1), StateTrieMigration::signed_migration_max_limits().unwrap(), task.dyn_size, MigrationProcess::::get() @@ -1523,7 +1520,7 @@ mod test { let correct_witness = 3 + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD * 3 + 1 + 2 + 3; new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( - RuntimeOrigin::signed(1), + Origin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], correct_witness, )); @@ -1536,7 +1533,7 @@ mod test { new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { // works if the witness is an overestimate frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( - RuntimeOrigin::signed(1), + Origin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], correct_witness + 99, )); @@ -1550,11 +1547,14 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( - RuntimeOrigin::signed(1), - vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - correct_witness - 1, - ),); + frame_support::assert_err!( + StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + correct_witness - 1, + ), + "wrong witness data" + ); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); @@ -1569,7 +1569,7 @@ mod test { fn custom_migrate_child_works() { new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( - RuntimeOrigin::signed(1), + Origin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 55 + 66, @@ -1584,12 +1584,13 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( - RuntimeOrigin::signed(1), + assert!(StateTrieMigration::migrate_custom_child( + Origin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 999999, // wrong witness - )); + ) + .is_err()); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); @@ -1607,10 +1608,7 @@ pub(crate) mod remote_tests { use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET}; use codec::Encode; use frame_benchmarking::Zero; - use frame_support::{ - traits::{Get, Hooks}, - weights::Weight, - }; + use frame_support::traits::{Get, Hooks}; use frame_system::Pallet as System; use remote_externalities::Mode; use sp_core::H256; @@ -1620,9 +1618,9 @@ pub(crate) mod remote_tests { #[allow(dead_code)] fn run_to_block>( n: ::BlockNumber, - ) -> (H256, Weight) { + ) -> (H256, u64) { let mut root = Default::default(); - let mut weight_sum = Weight::zero(); + let mut weight_sum = 0; while System::::block_number() < n { System::::set_block_number(System::::block_number() + One::one()); System::::on_initialize(System::::block_number()); @@ -1640,12 +1638,13 @@ pub(crate) mod remote_tests { /// /// This will print some very useful statistics, make sure [`crate::LOG_TARGET`] is enabled. #[allow(dead_code)] - pub(crate) async fn run_with_limits(limits: MigrationLimits, mode: Mode) - where + pub(crate) async fn run_with_limits< Runtime: crate::Config, - Block: BlockT, - Block::Header: serde::de::DeserializeOwned, - { + Block: BlockT + serde::de::DeserializeOwned, + >( + limits: MigrationLimits, + mode: Mode, + ) { let mut ext = remote_externalities::Builder::::new() .mode(mode) .state_version(sp_core::storage::StateVersion::V0) diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index 7414bb9038fdd..f2566f949c058 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_state_trie_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/state-trie-migration/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/state-trie-migration/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,48 +59,40 @@ impl WeightInfo for SubstrateWeight { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - // Minimum execution time: 23_874 nanoseconds. - Weight::from_ref_time(24_127_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (19_019_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - // Minimum execution time: 6_119 nanoseconds. - Weight::from_ref_time(6_325_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + (1_874_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn migrate_custom_top_success() -> Weight { - // Minimum execution time: 20_365 nanoseconds. - Weight::from_ref_time(20_790_000 as u64) + (16_381_000 as Weight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - // Minimum execution time: 38_979 nanoseconds. - Weight::from_ref_time(40_271_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (25_966_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn migrate_custom_child_success() -> Weight { - // Minimum execution time: 21_217 nanoseconds. - Weight::from_ref_time(21_526_000 as u64) + (16_712_000 as Weight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - // Minimum execution time: 43_853 nanoseconds. - Weight::from_ref_time(44_693_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (29_885_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: unknown [0x6b6579] (r:1 w:1) - /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { - // Minimum execution time: 5_575 nanoseconds. - Weight::from_ref_time(5_719_000 as u64) - // Standard Error: 3 - .saturating_add(Weight::from_ref_time(1_404 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (0 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } @@ -112,47 +101,39 @@ impl WeightInfo for () { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - // Minimum execution time: 23_874 nanoseconds. - Weight::from_ref_time(24_127_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (19_019_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - // Minimum execution time: 6_119 nanoseconds. - Weight::from_ref_time(6_325_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + (1_874_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } fn migrate_custom_top_success() -> Weight { - // Minimum execution time: 20_365 nanoseconds. - Weight::from_ref_time(20_790_000 as u64) + (16_381_000 as Weight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - // Minimum execution time: 38_979 nanoseconds. - Weight::from_ref_time(40_271_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (25_966_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn migrate_custom_child_success() -> Weight { - // Minimum execution time: 21_217 nanoseconds. - Weight::from_ref_time(21_526_000 as u64) + (16_712_000 as Weight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - // Minimum execution time: 43_853 nanoseconds. - Weight::from_ref_time(44_693_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (29_885_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: unknown [0x6b6579] (r:1 w:1) - /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { - // Minimum execution time: 5_575 nanoseconds. - Weight::from_ref_time(5_719_000 as u64) - // Standard Error: 3 - .saturating_add(Weight::from_ref_time(1_404 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (0 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/sudo/README.md b/frame/sudo/README.md index 7342832d2d7a7..e8f688091e326 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -72,6 +72,6 @@ You need to set an initial superuser account as the sudo `key`. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html -[`Origin`]: https://docs.substrate.io/main-docs/build/origins/ +[`Origin`]: https://docs.substrate.io/v3/runtime/origins License: Apache-2.0 diff --git a/frame/sudo/src/extension.rs b/frame/sudo/src/extension.rs deleted file mode 100644 index 068fa2ed928d5..0000000000000 --- a/frame/sudo/src/extension.rs +++ /dev/null @@ -1,107 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{Config, Pallet}; -use codec::{Decode, Encode}; -use frame_support::{dispatch::DispatchInfo, ensure}; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, - UnknownTransaction, ValidTransaction, - }, -}; -use sp_std::{fmt, marker::PhantomData}; - -/// Ensure that signed transactions are only valid if they are signed by sudo account. -/// -/// In the initial phase of a chain without any tokens you can not prevent accounts from sending -/// transactions. -/// These transactions would enter the transaction pool as the succeed the validation, but would -/// fail on applying them as they are not allowed/disabled/whatever. This would be some huge dos -/// vector to any kind of chain. This extension solves the dos vector by preventing any kind of -/// transaction entering the pool as long as it is not signed by the sudo account. -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct CheckOnlySudoAccount(PhantomData); - -impl Default for CheckOnlySudoAccount { - fn default() -> Self { - Self(Default::default()) - } -} - -impl fmt::Debug for CheckOnlySudoAccount { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "CheckOnlySudoAccount") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - Ok(()) - } -} - -impl CheckOnlySudoAccount { - /// Creates new `SignedExtension` to check sudo key. - pub fn new() -> Self { - Self::default() - } -} - -impl SignedExtension for CheckOnlySudoAccount -where - ::RuntimeCall: Dispatchable, -{ - const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn validate( - &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let sudo_key: T::AccountId = >::key().ok_or(UnknownTransaction::CannotLookup)?; - ensure!(*who == sudo_key, InvalidTransaction::BadSigner); - - Ok(ValidTransaction { - priority: info.weight.ref_time() as TransactionPriority, - ..Default::default() - }) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } -} diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index c18ced8911193..d9e72b37f2970 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -59,7 +59,7 @@ //! use frame_system::pallet_prelude::*; //! //! #[pallet::pallet] -//! pub struct Pallet(PhantomData); +//! pub struct Pallet(_); //! //! #[pallet::config] //! pub trait Config: frame_system::Config {} @@ -79,13 +79,6 @@ //! # fn main() {} //! ``` //! -//! ### Signed Extension -//! -//! The Sudo pallet defines the following extension: -//! -//! - [`CheckOnlySudoAccount`]: Ensures that the signed transactions are only valid if they are -//! signed by sudo account. -//! //! ## Genesis Config //! //! The Sudo pallet depends on the [`GenesisConfig`]. @@ -95,26 +88,22 @@ //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Origin`]: https://docs.substrate.io/main-docs/build/origins/ +//! [`Origin`]: https://docs.substrate.io/v3/runtime/origins #![cfg_attr(not(feature = "std"), no_std)] use sp_runtime::{traits::StaticLookup, DispatchResult}; use sp_std::prelude::*; -use frame_support::{dispatch::GetDispatchInfo, traits::UnfilteredDispatchable}; +use frame_support::{traits::UnfilteredDispatchable, weights::GetDispatchInfo}; -mod extension; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -pub use extension::CheckOnlySudoAccount; pub use pallet::*; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::{DispatchResult, *}; @@ -124,12 +113,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// A sudo-able call. - type RuntimeCall: Parameter - + UnfilteredDispatchable - + GetDispatchInfo; + type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; } #[pallet::pallet] @@ -150,11 +137,11 @@ pub mod pallet { /// # #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); - (dispatch_info.weight, dispatch_info.class) + (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) })] pub fn sudo( origin: OriginFor, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -179,7 +166,7 @@ pub mod pallet { #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn sudo_unchecked_weight( origin: OriginFor, - call: Box<::RuntimeCall>, + call: Box<::Call>, _weight: Weight, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. @@ -205,7 +192,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn set_key( origin: OriginFor, - new: AccountIdLookupOf, + new: ::Source, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -233,6 +220,7 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( dispatch_info.weight + .saturating_add(10_000) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, @@ -240,8 +228,8 @@ pub mod pallet { })] pub fn sudo_as( origin: OriginFor, - who: AccountIdLookupOf, - call: Box<::RuntimeCall>, + who: ::Source, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index db2ad4d563910..2e2a4abafcd98 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -22,7 +22,6 @@ use crate as sudo; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64, Contains, GenesisBuild}, - weights::Weight, }; use frame_system::limits; use sp_core::H256; @@ -40,11 +39,12 @@ pub mod logger { #[pallet::config] pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::without_storage_info] pub struct Pallet(PhantomData); #[pallet::call] @@ -57,7 +57,7 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is `Root`. ensure_root(origin)?; - >::try_append(i).map_err(|_| "could not append")?; + >::append(i); Self::deposit_event(Event::AppendI32 { value: i, weight }); Ok(().into()) } @@ -70,8 +70,8 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is some signed account. let sender = ensure_signed(origin)?; - >::try_append(i).map_err(|_| "could not append")?; - >::try_append(sender.clone()).map_err(|_| "could not append")?; + >::append(i); + >::append(sender.clone()); Self::deposit_event(Event::AppendI32AndAccount { sender, value: i, weight }); Ok(().into()) } @@ -86,12 +86,11 @@ pub mod logger { #[pallet::storage] #[pallet::getter(fn account_log)] - pub(super) type AccountLog = - StorageValue<_, BoundedVec>, ValueQuery>; + pub(super) type AccountLog = StorageValue<_, Vec, ValueQuery>; #[pallet::storage] #[pallet::getter(fn i32_log)] - pub(super) type I32Log = StorageValue<_, BoundedVec>, ValueQuery>; + pub(super) type I32Log = StorageValue<_, Vec, ValueQuery>; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -110,12 +109,12 @@ frame_support::construct_runtime!( ); parameter_types! { - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(1024); } pub struct BlockEverything; -impl Contains for BlockEverything { - fn contains(_: &RuntimeCall) -> bool { +impl Contains for BlockEverything { + fn contains(_: &Call) -> bool { false } } @@ -125,8 +124,8 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -134,7 +133,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -149,13 +148,13 @@ impl frame_system::Config for Test { // Implement the logger module's `Config` on the Test runtime. impl logger::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } // Implement the sudo module's `Config` on the Test runtime. impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; } // New types for dispatchable functions. diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index ae8f198736004..84c8e0c5c254e 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -18,10 +18,10 @@ //! Tests for the module. use super::*; -use frame_support::{assert_noop, assert_ok, weights::Weight}; +use frame_support::{assert_noop, assert_ok}; use mock::{ - new_test_ext, Logger, LoggerCall, RuntimeCall, RuntimeEvent as TestEvent, RuntimeOrigin, Sudo, - SudoCall, System, Test, + new_test_ext, Call, Event as TestEvent, Logger, LoggerCall, Origin, Sudo, SudoCall, System, + Test, }; #[test] @@ -39,19 +39,13 @@ fn sudo_basics() { // Configure a default test environment and set the root `key` to 1. new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as `origin`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1_000), - })); - assert_ok!(Sudo::sudo(RuntimeOrigin::signed(1), call)); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + assert_ok!(Sudo::sudo(Origin::signed(1), call)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1_000), - })); - assert_noop!(Sudo::sudo(RuntimeOrigin::signed(2), call), Error::::RequireSudo); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + assert_noop!(Sudo::sudo(Origin::signed(2), call), Error::::RequireSudo); }); } @@ -62,11 +56,8 @@ fn sudo_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1), - })); - assert_ok!(Sudo::sudo(RuntimeOrigin::signed(1), call)); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); + assert_ok!(Sudo::sudo(Origin::signed(1), call)); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) } @@ -75,42 +66,24 @@ fn sudo_emits_events_correctly() { fn sudo_unchecked_weight_basics() { new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as origin. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1_000), - })); - assert_ok!(Sudo::sudo_unchecked_weight( - RuntimeOrigin::signed(1), - call, - Weight::from_ref_time(1_000) - )); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when called with a non-root `key`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1_000), - })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); assert_noop!( - Sudo::sudo_unchecked_weight( - RuntimeOrigin::signed(2), - call, - Weight::from_ref_time(1_000) - ), + Sudo::sudo_unchecked_weight(Origin::signed(2), call, 1_000), Error::::RequireSudo, ); // `I32Log` is unchanged after unsuccessful call. assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1), - })); - let sudo_unchecked_weight_call = - SudoCall::sudo_unchecked_weight { call, weight: Weight::from_ref_time(1_000) }; + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); + let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: 1_000 }; let info = sudo_unchecked_weight_call.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1_000)); + assert_eq!(info.weight, 1_000); }); } @@ -121,15 +94,8 @@ fn sudo_unchecked_weight_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1), - })); - assert_ok!(Sudo::sudo_unchecked_weight( - RuntimeOrigin::signed(1), - call, - Weight::from_ref_time(1_000) - )); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); + assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) } @@ -138,14 +104,14 @@ fn sudo_unchecked_weight_emits_events_correctly() { fn set_key_basics() { new_test_ext(1).execute_with(|| { // A root `key` can change the root `key` - assert_ok!(Sudo::set_key(RuntimeOrigin::signed(1), 2)); + assert_ok!(Sudo::set_key(Origin::signed(1), 2)); assert_eq!(Sudo::key(), Some(2u64)); }); new_test_ext(1).execute_with(|| { // A non-root `key` will trigger a `RequireSudo` error and a non-root `key` cannot change // the root `key`. - assert_noop!(Sudo::set_key(RuntimeOrigin::signed(2), 3), Error::::RequireSudo); + assert_noop!(Sudo::set_key(Origin::signed(2), 3), Error::::RequireSudo); }); } @@ -156,10 +122,10 @@ fn set_key_emits_events_correctly() { System::set_block_number(1); // A root `key` can change the root `key`. - assert_ok!(Sudo::set_key(RuntimeOrigin::signed(1), 2)); + assert_ok!(Sudo::set_key(Origin::signed(1), 2)); System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(1) })); // Double check. - assert_ok!(Sudo::set_key(RuntimeOrigin::signed(2), 4)); + assert_ok!(Sudo::set_key(Origin::signed(2), 4)); System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(2) })); }); } @@ -168,27 +134,18 @@ fn set_key_emits_events_correctly() { fn sudo_as_basics() { new_test_ext(1).execute_with(|| { // A privileged function will not work when passed to `sudo_as`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { - i: 42, - weight: Weight::from_ref_time(1_000), - })); - assert_ok!(Sudo::sudo_as(RuntimeOrigin::signed(1), 2, call)); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { - i: 42, - weight: Weight::from_ref_time(1), - })); - assert_noop!(Sudo::sudo_as(RuntimeOrigin::signed(3), 2, call), Error::::RequireSudo); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { - i: 42, - weight: Weight::from_ref_time(1), - })); - assert_ok!(Sudo::sudo_as(RuntimeOrigin::signed(1), 2, call)); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); // The correct user makes the call within `sudo_as`. assert_eq!(Logger::account_log(), vec![2]); @@ -202,11 +159,8 @@ fn sudo_as_emits_events_correctly() { System::set_block_number(1); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { - i: 42, - weight: Weight::from_ref_time(1), - })); - assert_ok!(Sudo::sudo_as(RuntimeOrigin::signed(1), 2, call)); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone { sudo_result: Ok(()) })); }); } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 5af1dc26c1b49..ca26d3a5e32f2 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -17,7 +17,6 @@ serde = { version = "1.0.136", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-metadata = { version = "15.0.0", default-features = false, features = ["v14"] } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -26,34 +25,30 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../primitive sp-arithmetic = { version = "5.0.0", default-features = false, path = "../../primitives/arithmetic" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } tt-call = "1.0.8" frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "./procedural" } paste = "1.0" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.12.0", default-features = false, optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.12.0", optional = true, path = "../../primitives/state-machine" } bitflags = "1.3" impl-trait-for-tuples = "0.2.2" smallvec = "1.8.0" log = { version = "0.4.17", default-features = false } sp-core-hashing-proc-macro = { version = "5.0.0", path = "../../primitives/core/hashing/proc-macro" } -k256 = { version = "0.11.5", default-features = false, features = ["ecdsa"] } +k256 = { version = "0.10.4", default-features = false, features = ["ecdsa"] } [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { version = "4.0.0-dev", path = "../system" } -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } [features] default = ["std"] std = [ - "sp-core/std", - "k256/std", "once_cell", "serde", - "sp-api/std", "sp-io/std", "codec/std", "scale-info/std", @@ -64,8 +59,7 @@ std = [ "frame-metadata/std", "sp-inherents/std", "sp-staking/std", - "sp-state-machine/std", - "sp-weights/std", + "sp-state-machine", "frame-support-procedural/std", "log/std", ] @@ -77,7 +71,3 @@ no-metadata-docs = ["frame-support-procedural/no-metadata-docs"] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. full-metadata-docs = ["scale-info/docs"] -# Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of -# pallets in a runtime grows. Does increase the compile time! -tuples-96 = [] -tuples-128 = [] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 06b8056aff982..7ddec39cad9fb 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -16,8 +16,6 @@ proc-macro = true [dependencies] Inflector = "0.11.4" -cfg-expr = "0.10.3" -itertools = "0.10.3" proc-macro2 = "1.0.37" quote = "1.0.10" syn = { version = "1.0.98", features = ["full"] } diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index 8f07448f5785e..801b69035121d 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_dispatch( @@ -31,7 +30,6 @@ pub fn expand_outer_dispatch( let mut variant_patterns = Vec::new(); let mut query_call_part_macros = Vec::new(); let mut pallet_names = Vec::new(); - let mut pallet_attrs = Vec::new(); let system_path = &system_pallet.path; let pallets_with_call = pallet_decls.iter().filter(|decl| decl.exists_part("Call")); @@ -40,24 +38,12 @@ pub fn expand_outer_dispatch( let name = &pallet_declaration.name; let path = &pallet_declaration.path; let index = pallet_declaration.index; - let attr = - pallet_declaration.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); - variant_defs.extend(quote! { - #attr - #[codec(index = #index)] - #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ), - }); - variant_patterns.push(quote!(RuntimeCall::#name(call))); + variant_defs.extend( + quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),), + ); + variant_patterns.push(quote!(Call::#name(call))); pallet_names.push(name); - pallet_attrs.push(attr); query_call_part_macros.push(quote! { #path::__substrate_call_check::is_call_part_defined!(#name); }); @@ -73,20 +59,19 @@ pub fn expand_outer_dispatch( #scrate::scale_info::TypeInfo, #scrate::RuntimeDebug, )] - pub enum RuntimeCall { + pub enum Call { #variant_defs } #[cfg(test)] - impl RuntimeCall { + impl Call { /// Return a list of the module names together with their size in memory. pub const fn sizes() -> &'static [( &'static str, usize )] { use #scrate::dispatch::Callable; use core::mem::size_of; &[#( - #pallet_attrs ( stringify!(#pallet_names), - size_of::< <#pallet_names as Callable<#runtime>>::RuntimeCall >(), + size_of::< <#pallet_names as Callable<#runtime>>::Call >(), ), )*] } @@ -113,25 +98,18 @@ pub fn expand_outer_dispatch( } } } - impl #scrate::dispatch::GetDispatchInfo for RuntimeCall { + impl #scrate::dispatch::GetDispatchInfo for Call { fn get_dispatch_info(&self) -> #scrate::dispatch::DispatchInfo { match self { - #( - #pallet_attrs - #variant_patterns => call.get_dispatch_info(), - )* + #( #variant_patterns => call.get_dispatch_info(), )* } } } - // Deprecated, but will warn when used - #[allow(deprecated)] - impl #scrate::weights::GetDispatchInfo for RuntimeCall {} - impl #scrate::dispatch::GetCallMetadata for RuntimeCall { + impl #scrate::dispatch::GetCallMetadata for Call { fn get_call_metadata(&self) -> #scrate::dispatch::CallMetadata { use #scrate::dispatch::GetCallName; match self { #( - #pallet_attrs #variant_patterns => { let function_name = call.get_call_name(); let pallet_name = stringify!(#pallet_names); @@ -143,7 +121,6 @@ pub fn expand_outer_dispatch( fn get_module_names() -> &'static [&'static str] { &[#( - #pallet_attrs stringify!(#pallet_names), )*] } @@ -152,22 +129,21 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::{Callable, GetCallName}; match module { #( - #pallet_attrs stringify!(#pallet_names) => - <<#pallet_names as Callable<#runtime>>::RuntimeCall + <<#pallet_names as Callable<#runtime>>::Call as GetCallName>::get_call_names(), )* _ => unreachable!(), } } } - impl #scrate::dispatch::Dispatchable for RuntimeCall { - type RuntimeOrigin = RuntimeOrigin; - type Config = RuntimeCall; - type Info = #scrate::dispatch::DispatchInfo; - type PostInfo = #scrate::dispatch::PostDispatchInfo; - fn dispatch(self, origin: RuntimeOrigin) -> #scrate::dispatch::DispatchResultWithPostInfo { - if !::filter_call(&origin, &self) { + impl #scrate::dispatch::Dispatchable for Call { + type Origin = Origin; + type Config = Call; + type Info = #scrate::weights::DispatchInfo; + type PostInfo = #scrate::weights::PostDispatchInfo; + fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { + if !::filter_call(&origin, &self) { return #scrate::sp_std::result::Result::Err( #system_path::Error::<#runtime>::CallFiltered.into() ); @@ -176,12 +152,11 @@ pub fn expand_outer_dispatch( #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) } } - impl #scrate::traits::UnfilteredDispatchable for RuntimeCall { - type RuntimeOrigin = RuntimeOrigin; - fn dispatch_bypass_filter(self, origin: RuntimeOrigin) -> #scrate::dispatch::DispatchResultWithPostInfo { + impl #scrate::traits::UnfilteredDispatchable for Call { + type Origin = Origin; + fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { match self { #( - #pallet_attrs #variant_patterns => #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(call, origin), )* @@ -190,8 +165,7 @@ pub fn expand_outer_dispatch( } #( - #pallet_attrs - impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for RuntimeCall { + impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { #[allow(unreachable_patterns)] fn is_sub_type(&self) -> Option<&#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> { match self { @@ -202,8 +176,7 @@ pub fn expand_outer_dispatch( } } - #pallet_attrs - impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for RuntimeCall { + impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { fn from(call: #scrate::dispatch::CallableCallFor<#pallet_names, #runtime>) -> Self { #variant_patterns } diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index 9b731a5825a3c..a3d70f18529c7 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -19,7 +19,6 @@ use crate::construct_runtime::Pallet; use inflector::Inflector; use proc_macro2::TokenStream; use quote::{format_ident, quote, ToTokens}; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_config( @@ -41,19 +40,11 @@ pub fn expand_outer_config( let field_name = &Ident::new(&pallet_name.to_string().to_snake_case(), decl.name.span()); let part_is_generic = !pallet_entry.generics.params.is_empty(); - let attr = &decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); - types.extend(expand_config_types(attr, runtime, decl, &config, part_is_generic)); - fields.extend(quote!(#attr pub #field_name: #config,)); + types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); + fields.extend(quote!(pub #field_name: #config,)); build_storage_calls - .extend(expand_config_build_storage_call(scrate, attr, runtime, decl, field_name)); + .extend(expand_config_build_storage_call(scrate, runtime, decl, field_name)); query_genesis_config_part_macros.push(quote! { #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); #[cfg(feature = "std")] @@ -97,7 +88,6 @@ pub fn expand_outer_config( } fn expand_config_types( - attr: &TokenStream, runtime: &Ident, decl: &Pallet, config: &Ident, @@ -107,17 +97,14 @@ fn expand_config_types( match (decl.instance.as_ref(), part_is_generic) { (Some(inst), true) => quote! { - #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime, #path::#inst>; }, (None, true) => quote! { - #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime>; }, (_, false) => quote! { - #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig; }, @@ -126,7 +113,6 @@ fn expand_config_types( fn expand_config_build_storage_call( scrate: &TokenStream, - attr: &TokenStream, runtime: &Ident, decl: &Pallet, field_name: &Ident, @@ -139,7 +125,6 @@ fn expand_config_build_storage_call( }; quote! { - #attr #scrate::sp_runtime::BuildModuleGenesisStorage:: <#runtime, #instance>::build_module_genesis_storage(&self.#field_name, storage)?; } diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index b11fcef1bfd53..b242f9641562c 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_event( @@ -80,7 +79,7 @@ pub fn expand_outer_event( #scrate::RuntimeDebug, )] #[allow(non_camel_case_types)] - pub enum RuntimeEvent { + pub enum Event { #event_variants } @@ -98,35 +97,19 @@ fn expand_event_variant( let path = &pallet.path; let variant_name = &pallet.name; let part_is_generic = !generics.params.is_empty(); - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); match instance { - Some(inst) if part_is_generic => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event<#runtime, #path::#inst>), + Some(inst) if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime, #path::#inst>),) }, - Some(inst) => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event<#path::#inst>), + Some(inst) => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#path::#inst>),) }, - None if part_is_generic => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event<#runtime>), + None if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime>),) }, - None => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Event), + None => { + quote!(#[codec(index = #index)] #variant_name(#path::Event),) }, } } @@ -137,24 +120,14 @@ fn expand_event_conversion( pallet_event: &TokenStream, ) -> TokenStream { let variant_name = &pallet.name; - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); quote! { - #attr - impl From<#pallet_event> for RuntimeEvent { + impl From<#pallet_event> for Event { fn from(x: #pallet_event) -> Self { - RuntimeEvent::#variant_name(x) + Event::#variant_name(x) } } - #attr - impl TryInto<#pallet_event> for RuntimeEvent { + impl TryInto<#pallet_event> for Event { type Error = (); fn try_into(self) -> #scrate::sp_std::result::Result<#pallet_event, Self::Error> { diff --git a/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 599b34ba87241..0f0d538643240 100644 --- a/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::{Ident, TypePath}; pub fn expand_outer_inherent( @@ -29,24 +28,14 @@ pub fn expand_outer_inherent( scrate: &TokenStream, ) -> TokenStream { let mut pallet_names = Vec::new(); - let mut pallet_attrs = Vec::new(); let mut query_inherent_part_macros = Vec::new(); for pallet_decl in pallet_decls { if pallet_decl.exists_part("Inherent") { let name = &pallet_decl.name; let path = &pallet_decl.path; - let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); pallet_names.push(name); - pallet_attrs.push(attr); query_inherent_part_macros.push(quote! { #path::__substrate_inherent_check::is_inherent_part_defined!(#name); }); @@ -71,7 +60,6 @@ pub fn expand_outer_inherent( let mut inherents = Vec::new(); #( - #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { let inherent = <#unchecked_extrinsic as #scrate::inherent::Extrinsic>::new( inherent.into(), @@ -102,25 +90,22 @@ pub fn expand_outer_inherent( let mut is_inherent = false; - #( - #pallet_attrs - { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(call) { - is_inherent = true; - if let Err(e) = #pallet_names::check_inherent(call, self) { - result.put_error( - #pallet_names::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result; - } + #({ + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(call) { + is_inherent = true; + if let Err(e) = #pallet_names::check_inherent(call, self) { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; } } } } - )* + })* // Inherents are before any other extrinsics. // No module marked it as inherent thus it is not. @@ -130,7 +115,6 @@ pub fn expand_outer_inherent( } #( - #pallet_attrs match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { @@ -193,17 +177,14 @@ pub fn expand_outer_inherent( false } else { let mut is_inherent = false; - #( - #pallet_attrs - { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(&call) { - is_inherent = true; - } + #({ + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(&call) { + is_inherent = true; } } - )* + })* is_inherent }; diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs index ec90a0d30f98b..6e2dd5fc002c6 100644 --- a/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::{Ident, TypePath}; pub fn expand_runtime_metadata( @@ -48,17 +47,8 @@ pub fn expand_runtime_metadata( let event = expand_pallet_metadata_events(&filtered_names, runtime, scrate, decl); let constants = expand_pallet_metadata_constants(runtime, decl); let errors = expand_pallet_metadata_errors(runtime, decl); - let attr = decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); quote! { - #attr #scrate::metadata::PalletMetadata { name: stringify!(#name), index: #index, diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index 1551d85ea4c96..46f08832f0bb4 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_origin( @@ -103,13 +102,13 @@ pub fn expand_outer_origin( /// #[doc = #doc_string] #[derive(Clone)] - pub struct RuntimeOrigin { + pub struct Origin { caller: OriginCaller, - filter: #scrate::sp_std::rc::Rc::RuntimeCall) -> bool>>, + filter: #scrate::sp_std::rc::Rc::Call) -> bool>>, } #[cfg(not(feature = "std"))] - impl #scrate::sp_std::fmt::Debug for RuntimeOrigin { + impl #scrate::sp_std::fmt::Debug for Origin { fn fmt( &self, fmt: &mut #scrate::sp_std::fmt::Formatter, @@ -119,7 +118,7 @@ pub fn expand_outer_origin( } #[cfg(feature = "std")] - impl #scrate::sp_std::fmt::Debug for RuntimeOrigin { + impl #scrate::sp_std::fmt::Debug for Origin { fn fmt( &self, fmt: &mut #scrate::sp_std::fmt::Formatter, @@ -131,8 +130,8 @@ pub fn expand_outer_origin( } } - impl #scrate::traits::OriginTrait for RuntimeOrigin { - type Call = <#runtime as #system_path::Config>::RuntimeCall; + impl #scrate::traits::OriginTrait for Origin { + type Call = <#runtime as #system_path::Config>::Call; type PalletsOrigin = OriginCaller; type AccountId = <#runtime as #system_path::Config>::AccountId; @@ -147,7 +146,7 @@ pub fn expand_outer_origin( fn reset_filter(&mut self) { let filter = < <#runtime as #system_path::Config>::BaseCallFilter - as #scrate::traits::Contains<<#runtime as #system_path::Config>::RuntimeCall> + as #scrate::traits::Contains<<#runtime as #system_path::Config>::Call> >::contains; self.filter = #scrate::sp_std::rc::Rc::new(Box::new(filter)); @@ -169,10 +168,6 @@ pub fn expand_outer_origin( &self.caller } - fn into_caller(self) -> Self::PalletsOrigin { - self.caller - } - fn try_with_caller( mut self, f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -194,6 +189,13 @@ pub fn expand_outer_origin( fn signed(by: Self::AccountId) -> Self { #system_path::RawOrigin::Signed(by).into() } + + fn as_signed(self) -> Option { + match self.caller { + OriginCaller::system(#system_path::RawOrigin::Signed(by)) => Some(by), + _ => None, + } + } } #[derive( @@ -211,20 +213,21 @@ pub fn expand_outer_origin( // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] - impl RuntimeOrigin { + impl Origin { + #[doc = #doc_string_none_origin] pub fn none() -> Self { - ::none() + ::none() } #[doc = #doc_string_root_origin] pub fn root() -> Self { - ::root() + ::root() } #[doc = #doc_string_signed_origin] pub fn signed(by: <#runtime as #system_path::Config>::AccountId) -> Self { - ::signed(by) + ::signed(by) } } @@ -234,21 +237,6 @@ pub fn expand_outer_origin( } } - impl #scrate::traits::CallerTrait<<#runtime as #system_path::Config>::AccountId> for OriginCaller { - fn into_system(self) -> Option<#system_path::RawOrigin<<#runtime as #system_path::Config>::AccountId>> { - match self { - OriginCaller::system(x) => Some(x), - _ => None, - } - } - fn as_system_ref(&self) -> Option<&#system_path::RawOrigin<<#runtime as #system_path::Config>::AccountId>> { - match &self { - OriginCaller::system(o) => Some(o), - _ => None, - } - } - } - impl TryFrom for #system_path::Origin<#runtime> { type Error = OriginCaller; fn try_from(x: OriginCaller) @@ -262,7 +250,7 @@ pub fn expand_outer_origin( } } - impl From<#system_path::Origin<#runtime>> for RuntimeOrigin { + impl From<#system_path::Origin<#runtime>> for Origin { #[doc = #doc_string_runtime_origin] fn from(x: #system_path::Origin<#runtime>) -> Self { @@ -271,9 +259,9 @@ pub fn expand_outer_origin( } } - impl From for RuntimeOrigin { + impl From for Origin { fn from(x: OriginCaller) -> Self { - let mut o = RuntimeOrigin { + let mut o = Origin { caller: x, filter: #scrate::sp_std::rc::Rc::new(Box::new(|_| true)), }; @@ -284,9 +272,9 @@ pub fn expand_outer_origin( } } - impl From for #scrate::sp_std::result::Result<#system_path::Origin<#runtime>, RuntimeOrigin> { + impl From for #scrate::sp_std::result::Result<#system_path::Origin<#runtime>, Origin> { /// NOTE: converting to pallet origin loses the origin filter information. - fn from(val: RuntimeOrigin) -> Self { + fn from(val: Origin) -> Self { if let OriginCaller::system(l) = val.caller { Ok(l) } else { @@ -294,7 +282,7 @@ pub fn expand_outer_origin( } } } - impl From::AccountId>> for RuntimeOrigin { + impl From::AccountId>> for Origin { #[doc = #doc_string_runtime_origin_with_caller] fn from(x: Option<<#runtime as #system_path::Config>::AccountId>) -> Self { <#system_path::Origin<#runtime>>::from(x).into() @@ -315,35 +303,19 @@ fn expand_origin_caller_variant( let part_is_generic = !generics.params.is_empty(); let variant_name = &pallet.name; let path = &pallet.path; - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); match instance { - Some(inst) if part_is_generic => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Origin<#runtime, #path::#inst>), + Some(inst) if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime, #path::#inst>),) }, - Some(inst) => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Origin<#path::#inst>), + Some(inst) => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#path::#inst>),) }, - None if part_is_generic => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Origin<#runtime>), + None if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime>),) }, - None => quote! { - #attr - #[codec(index = #index)] - #variant_name(#path::Origin), + None => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin),) }, } } @@ -367,25 +339,15 @@ fn expand_origin_pallet_conversions( }; let doc_string = get_intra_doc_string(" Convert to runtime origin using", &path.module_name()); - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); quote! { - #attr impl From<#pallet_origin> for OriginCaller { fn from(x: #pallet_origin) -> Self { OriginCaller::#variant_name(x) } } - #attr - impl From<#pallet_origin> for RuntimeOrigin { + impl From<#pallet_origin> for Origin { #[doc = #doc_string] fn from(x: #pallet_origin) -> Self { let x: OriginCaller = x.into(); @@ -393,10 +355,9 @@ fn expand_origin_pallet_conversions( } } - #attr - impl From for #scrate::sp_std::result::Result<#pallet_origin, RuntimeOrigin> { + impl From for #scrate::sp_std::result::Result<#pallet_origin, Origin> { /// NOTE: converting to pallet origin loses the origin filter information. - fn from(val: RuntimeOrigin) -> Self { + fn from(val: Origin) -> Self { if let OriginCaller::#variant_name(l) = val.caller { Ok(l) } else { @@ -405,7 +366,6 @@ fn expand_origin_pallet_conversions( } } - #attr impl TryFrom for #pallet_origin { type Error = OriginCaller; fn try_from( diff --git a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs index 1d528779423a7..c030676802093 100644 --- a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs +++ b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_validate_unsigned( @@ -27,24 +26,14 @@ pub fn expand_outer_validate_unsigned( scrate: &TokenStream, ) -> TokenStream { let mut pallet_names = Vec::new(); - let mut pallet_attrs = Vec::new(); let mut query_validate_unsigned_part_macros = Vec::new(); for pallet_decl in pallet_decls { if pallet_decl.exists_part("ValidateUnsigned") { let name = &pallet_decl.name; let path = &pallet_decl.path; - let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); pallet_names.push(name); - pallet_attrs.push(attr); query_validate_unsigned_part_macros.push(quote! { #path::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined!(#name); }); @@ -55,15 +44,12 @@ pub fn expand_outer_validate_unsigned( #( #query_validate_unsigned_part_macros )* impl #scrate::unsigned::ValidateUnsigned for #runtime { - type Call = RuntimeCall; + type Call = Call; fn pre_dispatch(call: &Self::Call) -> Result<(), #scrate::unsigned::TransactionValidityError> { #[allow(unreachable_patterns)] match call { - #( - #pallet_attrs - RuntimeCall::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), - )* + #( Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), )* // pre-dispatch should not stop inherent extrinsics, validation should prevent // including arbitrary (non-inherent) extrinsics to blocks. _ => Ok(()), @@ -77,10 +63,7 @@ pub fn expand_outer_validate_unsigned( ) -> #scrate::unsigned::TransactionValidity { #[allow(unreachable_patterns)] match call { - #( - #pallet_attrs - RuntimeCall::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), - )* + #( Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), )* _ => #scrate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), } } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 9e22037a6782e..7b4156a94db58 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -144,11 +144,9 @@ mod expand; mod parse; -use cfg_expr::Predicate; use frame_support_procedural_tools::{ generate_crate_access, generate_crate_access_2018, generate_hidden_includes, }; -use itertools::Itertools; use parse::{ ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration, WhereSection, @@ -156,7 +154,6 @@ use parse::{ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; -use std::{collections::HashSet, str::FromStr}; use syn::{Ident, Result}; /// The fixed name of the system pallet. @@ -226,28 +223,6 @@ fn construct_runtime_final_expansion( Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) })?; - if !system_pallet.cfg_pattern.is_empty() { - return Err(syn::Error::new( - system_pallet.name.span(), - "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", - )) - } - - let features = pallets - .iter() - .filter_map(|decl| { - (!decl.cfg_pattern.is_empty()).then(|| { - decl.cfg_pattern.iter().flat_map(|attr| { - attr.predicates().filter_map(|pred| match pred { - Predicate::Feature(feat) => Some(feat), - Predicate::Test => Some("test"), - _ => None, - }) - }) - }) - }) - .flatten() - .collect::>(); let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(hidden_crate_name, "frame-support"); @@ -256,7 +231,7 @@ fn construct_runtime_final_expansion( let outer_event = expand::expand_outer_event(&name, &pallets, &scrate)?; let outer_origin = expand::expand_outer_origin(&name, system_pallet, &pallets, &scrate)?; - let all_pallets = decl_all_pallets(&name, pallets.iter(), &features); + let all_pallets = decl_all_pallets(&name, pallets.iter()); let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); let dispatch = expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate); @@ -318,140 +293,61 @@ fn construct_runtime_final_expansion( fn decl_all_pallets<'a>( runtime: &'a Ident, pallet_declarations: impl Iterator, - features: &HashSet<&str>, ) -> TokenStream2 { let mut types = TokenStream2::new(); - - // Every feature set to the pallet names that should be included by this feature set. - let mut features_to_names = features - .iter() - .map(|f| *f) - .powerset() - .map(|feat| (HashSet::from_iter(feat), Vec::new())) - .collect::, Vec<_>)>>(); - + let mut names = Vec::new(); for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; let mut generics = vec![quote!(#runtime)]; generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); - let mut attrs = Vec::new(); - for cfg in &pallet_declaration.cfg_pattern { - let feat = format!("#[cfg({})]\n", cfg.original()); - attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed")); - } let type_decl = quote!( - #(#attrs)* pub type #type_name = #pallet::Pallet <#(#generics),*>; ); types.extend(type_decl); - - if pallet_declaration.cfg_pattern.is_empty() { - for (_, names) in features_to_names.iter_mut() { - names.push(&pallet_declaration.name); - } - } else { - for (feature_set, names) in &mut features_to_names { - // Rust tidbit: if we have multiple `#[cfg]` feature on the same item, then the - // predicates listed in all `#[cfg]` attributes are effectively joined by `and()`, - // meaning that all of them must match in order to activate the item - let is_feature_active = pallet_declaration.cfg_pattern.iter().all(|expr| { - expr.eval(|pred| match pred { - Predicate::Feature(f) => feature_set.contains(f), - Predicate::Test => feature_set.contains(&"test"), - _ => false, - }) - }); - - if is_feature_active { - names.push(&pallet_declaration.name); - } - } - } + names.push(&pallet_declaration.name); } - // All possible features. This will be used below for the empty feature set. - let mut all_features = features_to_names + // Make nested tuple structure like: + // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` + // But ignore the system pallet. + let all_pallets_without_system = names .iter() - .flat_map(|f| f.0.iter().cloned()) - .collect::>(); - let attribute_to_names = features_to_names - .into_iter() - .map(|(mut features, names)| { - // If this is the empty feature set, it needs to be changed to negate all available - // features. So, we ensure that there is some type declared when all features are not - // enabled. - if features.is_empty() { - let test_cfg = all_features.remove("test").then_some(quote!(test)).into_iter(); - let features = all_features.iter(); - let attr = quote!(#[cfg(all( #(not(#test_cfg)),* #(not(feature = #features)),* ))]); - - (attr, names) - } else { - let test_cfg = features.remove("test").then_some(quote!(test)).into_iter(); - let disabled_features = all_features.difference(&features); - let features = features.iter(); - let attr = quote!(#[cfg(all( #(#test_cfg,)* #(feature = #features,)* #(not(feature = #disabled_features)),* ))]); - - (attr, names) - } - }) - .collect::>(); + .filter(|n| **n != SYSTEM_PALLET_NAME) + .rev() + .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let all_pallets_without_system = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types. - /// Excludes the System pallet. - pub type AllPalletsWithoutSystem = ( #(#names,)* ); - } - }); - - let all_pallets_with_system = attribute_to_names.iter().map(|(attr, names)| { - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types. - pub type AllPalletsWithSystem = ( #(#names,)* ); - } - }); + // Make nested tuple structure like: + // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` + let all_pallets_with_system = names + .iter() + .rev() + .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let all_pallets_without_system_reversed = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// Excludes the System pallet. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); - } - }); + // Make nested tuple structure like: + // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` + // But ignore the system pallet. + let all_pallets_without_system_reversed = names + .iter() + .filter(|n| **n != SYSTEM_PALLET_NAME) + .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let all_pallets_with_system_reversed = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithSystemReversed = ( #(#names,)* ); - } - }); + // Make nested tuple structure like: + // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` + let all_pallets_with_system_reversed = names + .iter() + .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let all_pallets_reversed_with_system_first = attribute_to_names.iter().map(|(attr, names)| { - let system = quote::format_ident!("{}", SYSTEM_PALLET_NAME); - let names = std::iter::once(&system) - .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME).cloned()); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// With the system pallet first. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); - } - }); + let system_pallet = match names.iter().find(|n| **n == SYSTEM_PALLET_NAME) { + Some(name) => name, + None => + return syn::Error::new( + proc_macro2::Span::call_site(), + "`System` pallet declaration is missing. \ + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", + ) + .into_compile_error(), + }; quote!( #types @@ -467,15 +363,26 @@ fn decl_all_pallets<'a>( https://github.com/paritytech/substrate/pull/10043")] pub type AllPallets = AllPalletsWithSystem; - #( #all_pallets_with_system )* + /// All pallets included in the runtime as a nested tuple of types. + pub type AllPalletsWithSystem = ( #all_pallets_with_system ); - #( #all_pallets_without_system )* + /// All pallets included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + pub type AllPalletsWithoutSystem = ( #all_pallets_without_system ); - #( #all_pallets_with_system_reversed )* + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// Excludes the System pallet. + pub type AllPalletsWithoutSystemReversed = ( #all_pallets_without_system_reversed ); - #( #all_pallets_without_system_reversed )* + /// All pallets included in the runtime as a nested tuple of types in reversed order. + pub type AllPalletsWithSystemReversed = ( #all_pallets_with_system_reversed ); - #( #all_pallets_reversed_with_system_first )* + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// With the system pallet first. + pub type AllPalletsReversedWithSystemFirst = ( + #system_pallet, + AllPalletsWithoutSystemReversed + ); ) } @@ -498,19 +405,6 @@ fn decl_pallet_runtime_setup( } }) .collect::>(); - let pallet_attrs = pallet_declarations - .iter() - .map(|pallet| { - pallet.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { - let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }) - }) - .collect::>(); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -521,7 +415,6 @@ fn decl_pallet_runtime_setup( fn index() -> Option { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( - #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#indices) } @@ -533,7 +426,6 @@ fn decl_pallet_runtime_setup( fn name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( - #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#name_strings) } @@ -545,7 +437,6 @@ fn decl_pallet_runtime_setup( fn module_name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( - #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#module_names) } @@ -557,7 +448,6 @@ fn decl_pallet_runtime_setup( fn crate_version() -> Option<#scrate::traits::CrateVersion> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( - #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some( <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 7a5acf43b92b0..711da85c10cfc 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -23,7 +23,7 @@ use syn::{ parse::{Parse, ParseStream}, punctuated::Punctuated, spanned::Spanned, - token, Attribute, Error, Ident, Path, Result, Token, + token, Error, Ident, Path, Result, Token, }; mod keyword { @@ -185,8 +185,6 @@ impl Parse for WhereDefinition { pub struct PalletDeclaration { /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, - /// Optional attributes tagged right above a pallet declaration. - pub attrs: Vec, /// Optional fixed index, e.g. `MyPallet ... = 3,`. pub index: Option, /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. @@ -214,8 +212,6 @@ pub enum SpecifiedParts { impl Parse for PalletDeclaration { fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let name = input.parse()?; let _: Token![:] = input.parse()?; let path = input.parse()?; @@ -283,7 +279,7 @@ impl Parse for PalletDeclaration { None }; - Ok(Self { attrs, name, path, instance, pallet_parts, specified_parts, index }) + Ok(Self { name, path, instance, pallet_parts, specified_parts, index }) } } @@ -539,8 +535,6 @@ pub struct Pallet { pub instance: Option, /// The pallet parts to use for the pallet. pub pallet_parts: Vec, - /// Expressions specified inside of a #[cfg] attribute. - pub cfg_pattern: Vec, } impl Pallet { @@ -653,32 +647,11 @@ fn convert_pallets(pallets: Vec) -> syn::Result (), } - let cfg_pattern = pallet - .attrs - .iter() - .map(|attr| { - if attr.path.segments.len() != 1 || attr.path.segments[0].ident != "cfg" { - let msg = "Unsupported attribute, only #[cfg] is supported on pallet \ - declarations in `construct_runtime`"; - return Err(syn::Error::new(attr.span(), msg)) - } - - attr.parse_args_with(|input: syn::parse::ParseStream| { - // Required, otherwise the parse stream doesn't advance and will result in - // an error. - let input = input.parse::()?; - cfg_expr::Expression::parse(&input.to_string()) - .map_err(|e| syn::Error::new(attr.span(), e.to_string())) - }) - }) - .collect::>>()?; - Ok(Pallet { name: pallet.name, index: final_index, path: pallet.path, instance: pallet.instance, - cfg_pattern, pallet_parts, }) }) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index cd30946ae7855..00204b7a4d906 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -36,7 +36,6 @@ mod transactional; mod tt_macro; use proc_macro::TokenStream; -use quote::quote; use std::{cell::RefCell, str::FromStr}; pub(crate) use storage::INHERENT_INSTANCE_NAME; @@ -312,7 +311,7 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// System: frame_system::{Pallet, Call, Event, Config} = 0, /// Test: path::to::test::{Pallet, Call} = 1, /// -/// // Pallets with instances. +/// // Pallets with instances /// Test2_Instance1: test2::::{Pallet, Call, Storage, Event, Config, Origin}, /// Test2_DefaultInstance: test2::{Pallet, Call, Storage, Event, Config, Origin} = 4, /// @@ -403,77 +402,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. -/// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. -/// -/// ## Macro expansion: -/// -/// The macro adds this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * `GetStorageVersion` -/// * `OnGenesis`: contains some logic to write the pallet version into storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. -/// -/// It implements `PalletInfoAccess` on `Pallet` to ease access to pallet information given by -/// `frame_support::traits::PalletInfo`. (The implementation uses the associated type -/// `frame_system::Config::PalletInfo`). -/// -/// It implements `StorageInfoTrait` on `Pallet` which give information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// `StorageInfoTrait` for each storage in the implementation of `StorageInfoTrait` for the -/// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the -/// `PartialStorageInfoTrait` implementation of storages. -/// -/// ## Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The aim -/// of dev mode is to loosen some of the restrictions and requirements placed on production -/// pallets for easy tinkering and development. Dev mode pallets should not be used in -/// production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By default, dev -/// mode pallets will assume a weight of zero (`0`) if a weight is not specified. This is -/// equivalent to specifying `#[weight(0)]` on all calls that do not specify a weight. -/// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on -/// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type -/// definitions. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///

-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// See `frame_support::pallet` docs for more info. +/// Macro to define a pallet. Docs are at `frame_support::pallet`. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -654,638 +583,3 @@ pub fn storage_alias(_: TokenStream, input: TokenStream) -> TokenStream { .unwrap_or_else(|r| r.into_compile_error()) .into() } - -/// Used internally to decorate pallet attribute macro stubs when they are erroneously used -/// outside of a pallet module -fn pallet_macro_stub() -> TokenStream { - quote!(compile_error!( - "This attribute can only be used from within a pallet module marked with `#[frame_support::pallet]`" - )) - .into() -} - -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item -/// in your `#[pallet::config]`. -#[proc_macro_attribute] -pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by `Get` -/// from [`pallet::config`](`macro@config`) into metadata, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; -/// } -/// ``` -#[proc_macro_attribute] -pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` -/// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. -#[proc_macro_attribute] -pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. -/// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. -/// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. -#[proc_macro_attribute] -pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// To generate the full storage info (used for PoV calculation) use the attribute -/// `#[pallet::generate_storage_info]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_storage_info] -/// pub struct Pallet(_); -/// ``` -/// -/// This requires all storage items to implement the trait `StorageInfoTrait`, thus all keys -/// and value types must be bound by `MaxEncodedLen`. Individual storages can opt-out from this -/// constraint by using [`#[pallet::unbounded]`](`macro@unbounded`) (see -/// [`#[pallet::storage]`](`macro@storage`) for more info). -#[proc_macro_attribute] -pub fn generate_storage_info(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// Because the `pallet::pallet` macro implements `GetStorageVersion`, the current storage -/// version needs to be communicated to the macro. This can be done by using the -/// `pallet::storage_version` attribute: -/// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` -/// -/// If not present, the current storage version is set to the default value. -#[proc_macro_attribute] -pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::hooks]` attribute allows you to specify a `Hooks` implementation for -/// `Pallet` that specifies pallet-specific logic. -/// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// ## Macro expansion -/// -/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, -/// `OffchainWorker`, and `IntegrityTest` using the provided `Hooks` implementation. -/// -/// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some -/// additional logic. E.g. logic to write the pallet version into storage. -/// -/// NOTE: The macro also adds some tracing logic when implementing the above traits. The -/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. -#[proc_macro_attribute] -pub fn hooks(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. -#[proc_macro_attribute] -pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. -#[proc_macro_attribute] -pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. -/// -/// All call indexes start from 0, until it encounters a dispatchable function with a defined -/// call index. The dispatchable function that lexically follows the function with a defined -/// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// All arguments must implement [`Debug`], [`PartialEq`], [`Eq`], `Decode`, `Encode`, and -/// [`Clone`]. For ease of use, bound by the trait `frame_support::pallet_prelude::Member`. -/// -/// If no `#[pallet::call]` exists, then a default implementation corresponding to the -/// following code is automatically generated: -/// -/// ```ignore -/// #[pallet::call] -/// impl Pallet {} -/// ``` -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the -/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. -/// -/// ### Macro expansion -/// -/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: -/// [`Clone`], [`Eq`], [`PartialEq`], [`Debug`] (with stripped implementation in `not("std")`), -/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, and `UnfilteredDispatchable`. -/// -/// The macro implements the `Callable` trait on `Pallet` and a function `call_functions` -/// which returns the dispatchable metadata. -#[proc_macro_attribute] -pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// Allows you to define some extra constants to be added into constant metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. -/// -/// ## Macro expansion -/// -/// The macro add some extra constants to pallet constant metadata. -#[proc_macro_attribute] -pub fn extra_constants(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. -/// -/// Any field type in the enum variants must implement `TypeInfo` in order to be properly used -/// in the metadata, and its encoded size should be as small as possible, preferably 1 byte in -/// size in order to reduce storage size. The error enum itself has an absolute maximum encoded -/// size specified by `MAX_MODULE_ERROR_ENCODED_SIZE`. -/// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement `PalletError`, otherwise the pallet will -/// fail to compile. Rust primitive types have already implemented the `PalletError` trait -/// along with some commonly used stdlib types such as [`Option`] and `PhantomData`, and hence -/// in most use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// ## Macro expansion -/// -/// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, and -/// `as_str` using variant doc. -/// -/// The macro also implements `From>` for `&'static str` and `From>` for -/// `DispatchError`. -#[proc_macro_attribute] -pub fn error(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::event]` attribute allows you to define pallet events. Pallet events are -/// stored under the `system` / `events` key when the block is applied (and then replaced when -/// the next block writes it's events). -/// -/// The Event enum must be defined as follows: -/// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` -/// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. -/// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], `Encode`, `Decode`, and -/// [`Debug`] (on std only). For ease of use, bound by the trait `Member`, available in -/// `frame_support::pallet_prelude`. -#[proc_macro_attribute] -pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. -/// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. -/// -/// ## Macro expansion -/// -/// The macro will add on enum `Event` the attributes: -/// * `#[derive(frame_support::CloneNoBound)]` -/// * `#[derive(frame_support::EqNoBound)]` -/// * `#[derive(frame_support::PartialEqNoBound)]` -/// * `#[derive(frame_support::RuntimeDebugNoBound)]` -/// * `#[derive(codec::Encode)]` -/// * `#[derive(codec::Decode)]` -/// -/// The macro implements `From>` for (). -/// -/// The macro implements a metadata function on `Event` returning the `EventMetadata`. -/// -/// If `#[pallet::generate_deposit]` is present then the macro implements `fn deposit_event` on -/// `Pallet`. -#[proc_macro_attribute] -pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. -/// -/// Item should be defined as: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; -/// ``` -/// -/// or with unnamed generic: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<_, $some_generics, ...>; -/// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * `StorageValue` expects `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `StorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `CountedStorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `StorageDoubleMap` expects `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` and optionally -/// `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the `CountedStorageMap` variant, the `Prefix` also implements -/// `CountedStorageMapInstance`. It also associates a `CounterPrefix`, which is implemented the -/// same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = CountedStorageaMap<...>` will store -/// its counter at the prefix: `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. -#[proc_macro_attribute] -pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. -/// -/// Also see [`pallet::storage`](`macro@storage`) -#[proc_macro_attribute] -pub fn getter(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use. This is helpful if you wish to rename the storage field but don't -/// want to perform a migration. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` -#[proc_macro_attribute] -pub fn storage_prefix(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). -#[proc_macro_attribute] -pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The optional attribute `#[pallet::whitelist_storage]` will declare the -/// storage as whitelisted from benchmarking. Doing so will exclude reads of -/// that value's storage key from counting towards weight calculations during -/// benchmarking. -/// -/// This attribute should only be attached to storages that are known to be -/// read/used in every block. This will result in a more accurate benchmarking weight. -/// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; -/// ``` -/// -/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as -/// `#[pallet::whitelist_storage]` and can only be placed inside a `pallet` module in order for -/// it to work properly. -#[proc_macro_attribute] -pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait -/// to ease the use of storage types. This attribute is meant to be used alongside -/// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute -/// can be used multiple times. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` -/// -/// ## Macro expansion -/// -/// The macro renames the function to some internal name, generates a struct with the original -/// name of the function and its generic, and implements `Get<$ReturnType>` by calling the user -/// defined function. -#[proc_macro_attribute] -pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. -/// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait `GenesisBuild` with [`#[pallet::genesis_build]`](`macro@genesis_build`). The type -/// generics are constrained to be either none, or `T` or `T: Config`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::genesis_config] -/// pub struct GenesisConfig { -/// _myfield: BalanceOf, -/// } -/// ``` -#[proc_macro_attribute] -pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's -/// initial state. -/// -/// The impl must be defined as: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig<$maybe_generics> { -/// fn build(&self) { $expr } -/// } -/// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on -/// type `GenesisConfig` with generics none or `T`. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// fn build(&self) {} -/// } -/// ``` -/// -/// ## Macro expansion -/// -/// The macro will add the following attribute: -/// * `#[cfg(feature = "std")]` -/// -/// The macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as a second -/// generic for non-instantiable pallets. -#[proc_macro_attribute] -pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type -/// `Pallet`, and some optional where clause. -/// -/// ## Macro expansion -/// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. -#[proc_macro_attribute] -pub fn inherent(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type -/// `Pallet`, and some optional where clause. -/// -/// NOTE: There is also the `sp_runtime::traits::SignedExtension` trait that can be used to add -/// some specific logic for transaction validation. -/// -/// ## Macro expansion -/// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. -#[proc_macro_attribute] -pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. -/// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. -/// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. -#[proc_macro_attribute] -pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 6b166e6726d38..fe7589a8275d2 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -16,7 +16,6 @@ // limitations under the License. use crate::{pallet::Def, COUNTER}; -use quote::ToTokens; use syn::spanned::Spanned; /// @@ -32,7 +31,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { (span, where_clause, methods, docs) }, - None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()), + None => (def.item.span(), None, Vec::new(), Vec::new()), }; let frame_support = &def.frame_support; let frame_system = &def.frame_system; @@ -141,42 +140,6 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; - // Wrap all calls inside of storage layers - if let Some(syn::Item::Impl(item_impl)) = def - .call - .as_ref() - .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) - { - item_impl.items.iter_mut().for_each(|i| { - if let syn::ImplItem::Method(method) = i { - let block = &method.block; - method.block = syn::parse_quote! {{ - // We execute all dispatchable in a new storage layer, allowing them - // to return an error at any point, and undoing any storage changes. - #frame_support::storage::with_storage_layer(|| #block) - }}; - } - }); - } - - // Extracts #[allow] attributes, necessary so that we don't run into compiler warnings - let maybe_allow_attrs = methods - .iter() - .map(|method| { - method - .attrs - .iter() - .find(|attr| { - if let Ok(syn::Meta::List(syn::MetaList { path, .. })) = attr.parse_meta() { - path.segments.last().map(|seg| seg.ident == "allow").unwrap_or(false) - } else { - false - } - }) - .map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream()) - }) - .collect::>(); - quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -274,10 +237,6 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } - // Deprecated, but will warn when used - #[allow(deprecated)] - impl<#type_impl_gen> #frame_support::weights::GetDispatchInfo for #call_ident<#type_use_gen> #where_clause {} - impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen> #where_clause { @@ -297,10 +256,10 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { for #call_ident<#type_use_gen> #where_clause { - type RuntimeOrigin = #frame_system::pallet_prelude::OriginFor; + type Origin = #frame_system::pallet_prelude::OriginFor; fn dispatch_bypass_filter( self, - origin: Self::RuntimeOrigin + origin: Self::Origin ) -> #frame_support::dispatch::DispatchResultWithPostInfo { match self { #( @@ -308,9 +267,12 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); - #maybe_allow_attrs - <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) - .map(Into::into).map_err(Into::into) + // We execute all dispatchable in a new storage layer, allowing them + // to return an error at any point, and undoing any storage changes. + #frame_support::storage::with_storage_layer(|| { + <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) + .map(Into::into).map_err(Into::into) + }) }, )* Self::__Ignore(_, _) => { @@ -324,7 +286,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #frame_support::dispatch::Callable for #pallet_ident<#type_use_gen> #where_clause { - type RuntimeCall = #call_ident<#type_use_gen>; + type Call = #call_ident<#type_use_gen>; } impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 5a8487b09de5c..124e8b312ce39 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -111,7 +111,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&error_item.attrs).is_empty() { error_item.attrs.push(syn::parse_quote!( #[doc = r" - Custom [dispatch errors](https://docs.substrate.io/main-docs/build/events-errors/) + Custom [dispatch errors](https://docs.substrate.io/v3/runtime/events-and-errors) of this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index abed680eb245e..acd60ab959c61 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -61,8 +61,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { let event_where_clause = &event.where_clause; // NOTE: actually event where clause must be a subset of config where clause because of - // `type RuntimeEvent: From>`. But we merge either way for potential better error - // message + // `type Event: From>`. But we merge either way for potential better error message let completed_where_clause = super::merge_where_clauses(&[&event.where_clause, &def.config.where_clause]); @@ -99,7 +98,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&event_item.attrs).is_empty() { event_item.attrs.push(syn::parse_quote!( #[doc = r" - The [event](https://docs.substrate.io/main-docs/build/events-errors/) emitted + The [event](https://docs.substrate.io/v3/runtime/events-and-errors) emitted by this pallet. "] )); @@ -137,13 +136,13 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> Pallet<#type_use_gen> #completed_where_clause { #fn_vis fn deposit_event(event: Event<#event_use_gen>) { let event = < - ::RuntimeEvent as + ::Event as From> >::from(event); let event = < - ::RuntimeEvent as - Into<::RuntimeEvent> + ::Event as + Into<::Event> >::into(event); <#frame_system::Pallet>::deposit_event(event) diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs index d19476779011b..53d0695e5f971 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -19,7 +19,7 @@ use crate::pallet::Def; /// /// * implement the trait `sp_runtime::BuildModuleGenesisStorage` -/// * add #[cfg(feature = "std")] to GenesisBuild implementation. +/// * add #[cfg(features = "std")] to GenesisBuild implementation. pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config = if let Some(genesis_config) = &def.genesis_config { genesis_config diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index d8d009cf3c940..7a1a94cf46d31 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -17,6 +17,7 @@ use crate::pallet::Def; +/// /// * implement the individual traits using the Hooks trait pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let (where_clause, span, has_runtime_upgrade) = match def.hooks.as_ref() { @@ -26,7 +27,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let has_runtime_upgrade = hooks.has_runtime_upgrade; (where_clause, span, has_runtime_upgrade) }, - None => (def.config.where_clause.clone(), def.pallet_struct.attr_span, false), + None => (None, def.pallet_struct.attr_span, false), }; let frame_support = &def.frame_support; @@ -58,19 +59,6 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } }; - let log_try_state = quote::quote! { - let pallet_name = < - ::PalletInfo - as - #frame_support::traits::PalletInfo - >::name::().expect("Every active pallet has a name in the runtime; qed"); - #frame_support::log::debug!( - target: #frame_support::LOG_TARGET, - "🩺 try-state pallet {:?}", - pallet_name, - ); - }; - let hooks_impl = if def.hooks.is_none() { let frame_system = &def.frame_system; quote::quote! { @@ -160,7 +148,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<#frame_support::sp_std::vec::Vec, &'static str> { + fn pre_upgrade() -> Result<(), &'static str> { < Self as @@ -169,12 +157,12 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: #frame_support::sp_std::vec::Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { < Self as #frame_support::traits::Hooks<::BlockNumber> - >::post_upgrade(state) + >::post_upgrade() } } @@ -203,23 +191,5 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::integrity_test() } } - - #[cfg(feature = "try-runtime")] - impl<#type_impl_gen> - #frame_support::traits::TryState<::BlockNumber> - for #pallet_ident<#type_use_gen> #where_clause - { - fn try_state( - n: ::BlockNumber, - _s: #frame_support::traits::TryStateSelect - ) -> Result<(), &'static str> { - #log_try_state - < - Self as #frame_support::traits::Hooks< - ::BlockNumber - > - >::try_state(n) - } - } ) } diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index c33d2386700b2..83bef7a97af1f 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -74,7 +74,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { def.item.attrs.push(syn::parse_quote!( #[doc = r" The module that hosts all the - [FRAME](https://docs.substrate.io/main-docs/build/events-errors/) + [FRAME](https://docs.substrate.io/v3/runtime/frame) types needed to add this pallet to a runtime. "] diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index e5941a33fee13..52586a70a521a 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -62,7 +62,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&pallet_item.attrs).is_empty() { pallet_item.attrs.push(syn::parse_quote!( #[doc = r" - The [pallet](https://docs.substrate.io/reference/frame-pallets/#pallets) implementing + The [pallet](https://docs.substrate.io/v3/runtime/frame#pallets) implementing the on-chain logic. "] )); @@ -166,24 +166,6 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { quote::quote! { #frame_support::traits::StorageVersion::default() } }; - let whitelisted_storage_idents: Vec = def - .storages - .iter() - .filter_map(|s| s.whitelisted.then_some(s.ident.clone())) - .collect(); - - let whitelisted_storage_keys_impl = quote::quote![ - use #frame_support::traits::{StorageInfoTrait, TrackedStorageKey, WhitelistedStorageKeys}; - impl<#type_impl_gen> WhitelistedStorageKeys for #pallet_ident<#type_use_gen> #storages_where_clauses { - fn whitelisted_storage_keys() -> #frame_support::sp_std::vec::Vec { - use #frame_support::sp_std::vec; - vec![#( - TrackedStorageKey::new(#whitelisted_storage_idents::<#type_use_gen>::hashed_key().to_vec()) - ),*] - } - } - ]; - quote::quote_spanned!(def.pallet_struct.attr_span => #pallet_error_metadata @@ -258,7 +240,9 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #config_where_clause { fn count() -> usize { 1 } - fn infos() -> #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> { + fn accumulate( + acc: &mut #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> + ) { use #frame_support::traits::PalletInfoAccess; let item = #frame_support::traits::PalletInfoData { index: Self::index(), @@ -266,11 +250,10 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - #frame_support::sp_std::vec![item] + acc.push(item); } } #storage_info - #whitelisted_storage_keys_impl ) } diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 181f35b545496..657968e17a80c 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -19,9 +19,7 @@ use crate::pallet::{ parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, Def, }; -use quote::ToTokens; -use std::{collections::HashMap, ops::IndexMut}; -use syn::spanned::Spanned; +use std::collections::HashMap; /// Generate the prefix_ident related to the storage. /// prefix_ident is used for the prefix struct to be given to storage as first generic param. @@ -86,28 +84,12 @@ fn check_prefix_duplicates( Ok(()) } -pub struct ResultOnEmptyStructMetadata { - /// The Rust ident that is going to be used as the name of the OnEmpty struct. - pub name: syn::Ident, - /// The path to the error type being returned by the ResultQuery. - pub error_path: syn::Path, - /// The visibility of the OnEmpty struct. - pub visibility: syn::Visibility, - /// The type of the storage item. - pub value_ty: syn::Type, - /// The name of the pallet error enum variant that is going to be returned. - pub variant_name: syn::Ident, - /// The span used to report compilation errors about the OnEmpty struct. - pub span: proc_macro2::Span, -} - /// /// * if generics are unnamed: replace the first generic `_` by the generated prefix structure /// * if generics are named: reorder the generic, remove their name, and add the missing ones. /// * Add `#[allow(type_alias_bounds)]` -pub fn process_generics(def: &mut Def) -> syn::Result> { +pub fn process_generics(def: &mut Def) -> syn::Result<()> { let frame_support = &def.frame_support; - let mut on_empty_struct_metadata = Vec::new(); for storage_def in def.storages.iter_mut() { let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; @@ -138,72 +120,27 @@ pub fn process_generics(def: &mut Def) -> syn::Result syn::Type { - if let Some(QueryKind::ResultQuery(error_path, variant_name)) = - storage_def.query_kind.as_ref() - { - let on_empty_ident = - quote::format_ident!("__Frame_Internal_Get{}Result", storage_def.ident); - on_empty_struct_metadata.push(ResultOnEmptyStructMetadata { - name: on_empty_ident.clone(), - visibility: storage_def.vis.clone(), - value_ty, - error_path: error_path.clone(), - variant_name: variant_name.clone(), - span: storage_def.attr_span, - }); - return syn::parse_quote!(#on_empty_ident) - } - syn::parse_quote!(#frame_support::traits::GetDefault) - }; + let default_on_empty: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); let default_max_values: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); - let set_result_query_type_parameter = |query_type: &mut syn::Type| -> syn::Result<()> { - if let Some(QueryKind::ResultQuery(error_path, _)) = storage_def.query_kind.as_ref() { - if let syn::Type::Path(syn::TypePath { path: syn::Path { segments, .. }, .. }) = - query_type - { - if let Some(seg) = segments.last_mut() { - if let syn::PathArguments::AngleBracketed( - syn::AngleBracketedGenericArguments { args, .. }, - ) = &mut seg.arguments - { - args.clear(); - args.push(syn::GenericArgument::Type(syn::parse_quote!(#error_path))); - } - } - } else { - let msg = format!( - "Invalid pallet::storage, unexpected type for query, expected ResultQuery \ - with 1 type parameter, found `{}`", - query_type.to_token_stream().to_string() - ); - return Err(syn::Error::new(query_type.span(), msg)) - } - } - Ok(()) - }; - if let Some(named_generics) = storage_def.named_generics.clone() { args.args.clear(); args.args.push(syn::parse_quote!( #prefix_ident<#type_use_gen> )); match named_generics { StorageGenerics::Value { value, query_kind, on_empty } => { - args.args.push(syn::GenericArgument::Type(value.clone())); - let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); - set_result_query_type_parameter(&mut query_kind)?; + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); args.args.push(syn::GenericArgument::Type(on_empty)); }, StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value.clone())); - let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); - set_result_query_type_parameter(&mut query_kind)?; + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -218,11 +155,10 @@ pub fn process_generics(def: &mut Def) -> syn::Result { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value.clone())); - let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); - set_result_query_type_parameter(&mut query_kind)?; + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -241,22 +177,20 @@ pub fn process_generics(def: &mut Def) -> syn::Result { args.args.push(syn::GenericArgument::Type(keygen)); - args.args.push(syn::GenericArgument::Type(value.clone())); - let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); - set_result_query_type_parameter(&mut query_kind)?; + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -264,40 +198,10 @@ pub fn process_generics(def: &mut Def) -> syn::Result ); - - let (value_idx, query_idx, on_empty_idx) = match storage_def.metadata { - Metadata::Value { .. } => (1, 2, 3), - Metadata::NMap { .. } => (2, 3, 4), - Metadata::Map { .. } | Metadata::CountedMap { .. } => (3, 4, 5), - Metadata::DoubleMap { .. } => (5, 6, 7), - }; - - if query_idx < args.args.len() { - if let syn::GenericArgument::Type(query_kind) = args.args.index_mut(query_idx) { - set_result_query_type_parameter(query_kind)?; - } - } else if let Some(QueryKind::ResultQuery(error_path, _)) = - storage_def.query_kind.as_ref() - { - args.args.push(syn::GenericArgument::Type(syn::parse_quote!(#error_path))) - } - - // Here, we only need to check if OnEmpty is *not* specified, and if so, then we have to - // generate a default OnEmpty struct for it. - if on_empty_idx >= args.args.len() && - matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) - { - let value_ty = match args.args[value_idx].clone() { - syn::GenericArgument::Type(ty) => ty, - _ => unreachable!(), - }; - let on_empty = default_on_empty(value_ty); - args.args.push(syn::GenericArgument::Type(on_empty)); - } } } - Ok(on_empty_struct_metadata) + Ok(()) } /// @@ -308,10 +212,9 @@ pub fn process_generics(def: &mut Def) -> syn::Result proc_macro2::TokenStream { - let on_empty_struct_metadata = match process_generics(def) { - Ok(idents) => idents, - Err(e) => return e.into_compile_error(), - }; + if let Err(e) = process_generics(def) { + return e.into_compile_error() + } // Check for duplicate prefixes let mut prefix_set = HashMap::new(); @@ -374,10 +277,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => - quote::quote_spanned!(storage.attr_span => - Result<#value, #error_path> - ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -397,10 +296,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => - quote::quote_spanned!(storage.attr_span => - Result<#value, #error_path> - ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -422,10 +317,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => - quote::quote_spanned!(storage.attr_span => - Result<#value, #error_path> - ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -447,10 +338,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => - quote::quote_spanned!(storage.attr_span => - Result<#value, #error_path> - ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -474,10 +361,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), - QueryKind::ResultQuery(error_path, _) => - quote::quote_spanned!(storage.attr_span => - Result<#value, #error_path> - ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -576,61 +459,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { ) }); - let on_empty_structs = on_empty_struct_metadata.into_iter().map(|metadata| { - use crate::pallet::parse::GenericKind; - use syn::{GenericArgument, Path, PathArguments, PathSegment, Type, TypePath}; - - let ResultOnEmptyStructMetadata { - name, - visibility, - value_ty, - error_path, - variant_name, - span, - } = metadata; - - let generic_kind = match error_path.segments.last() { - Some(PathSegment { arguments: PathArguments::AngleBracketed(args), .. }) => { - let (has_config, has_instance) = - args.args.iter().fold((false, false), |(has_config, has_instance), arg| { - match arg { - GenericArgument::Type(Type::Path(TypePath { - path: Path { segments, .. }, - .. - })) => { - let maybe_config = - segments.first().map_or(false, |seg| seg.ident == "T"); - let maybe_instance = - segments.first().map_or(false, |seg| seg.ident == "I"); - - (has_config || maybe_config, has_instance || maybe_instance) - }, - _ => (has_config, has_instance), - } - }); - GenericKind::from_gens(has_config, has_instance).unwrap_or(GenericKind::None) - }, - _ => GenericKind::None, - }; - let type_impl_gen = generic_kind.type_impl_gen(proc_macro2::Span::call_site()); - let config_where_clause = &def.config.where_clause; - - quote::quote_spanned!(span => - #[doc(hidden)] - #[allow(non_camel_case_types)] - #visibility struct #name; - - impl<#type_impl_gen> #frame_support::traits::Get> - for #name - #config_where_clause - { - fn get() -> Result<#value_ty, #error_path> { - Err(<#error_path>::#variant_name) - } - } - ) - }); - let mut where_clauses = vec![&def.config.where_clause]; where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); let completed_where_clause = super::merge_where_clauses(&where_clauses); @@ -661,6 +489,5 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #( #getters )* #( #prefix_structs )* - #( #on_empty_structs )* ) } diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs index 3f85be81c1f7d..ff9f122867746 100644 --- a/frame/support/procedural/src/pallet/mod.rs +++ b/frame/support/procedural/src/pallet/mod.rs @@ -31,30 +31,20 @@ mod parse; pub use parse::Def; use syn::spanned::Spanned; -mod keyword { - syn::custom_keyword!(dev_mode); -} - pub fn pallet( attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { - let mut dev_mode = false; if !attr.is_empty() { - if let Ok(_) = syn::parse::(attr.clone()) { - dev_mode = true; - } else { - let msg = "Invalid pallet macro call: unexpected attribute. Macro call must be \ - bare, such as `#[frame_support::pallet]` or `#[pallet]`, or must specify the \ - `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or \ - #[pallet(dev_mode)]."; - let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into() - } + let msg = + "Invalid pallet macro call: expected no attributes, e.g. macro call must be just \ + `#[frame_support::pallet]` or `#[pallet]`"; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() } let item = syn::parse_macro_input!(item as syn::ItemMod); - match parse::Def::try_from(item, dev_mode) { + match parse::Def::try_from(item) { Ok(def) => expand::expand(def).into(), Err(e) => e.to_compile_error().into(), } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index fbca9a52c767c..d8a81d699b8c2 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -48,8 +48,8 @@ pub struct CallDef { pub docs: Vec, } -/// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` #[derive(Clone)] +/// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` pub struct CallVariantDef { /// Function name. pub name: syn::Ident, @@ -61,8 +61,6 @@ pub struct CallVariantDef { pub call_index: u8, /// Docs, used for metadata. pub docs: Vec, - /// Attributes annotated at the top of the dispatchable function. - pub attrs: Vec, } /// Attributes for functions in call impl block. @@ -144,20 +142,19 @@ impl CallDef { attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, - dev_mode: bool, ) -> syn::Result { - let item_impl = if let syn::Item::Impl(item) = item { + let item = if let syn::Item::Impl(item) = item { item } else { return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let instances = vec![ - helper::check_impl_gen(&item_impl.generics, item_impl.impl_token.span())?, - helper::check_pallet_struct_usage(&item_impl.self_ty)?, + helper::check_impl_gen(&item.generics, item.impl_token.span())?, + helper::check_pallet_struct_usage(&item.self_ty)?, ]; - if let Some((_, _, for_)) = item_impl.trait_ { + if let Some((_, _, for_)) = item.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; return Err(syn::Error::new(for_.span(), msg)) @@ -166,8 +163,8 @@ impl CallDef { let mut methods = vec![]; let mut indices = HashMap::new(); let mut last_index: Option = None; - for item in &mut item_impl.items { - if let syn::ImplItem::Method(method) = item { + for impl_item in &mut item.items { + if let syn::ImplItem::Method(method) = impl_item { if !matches!(method.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::call, dispatchable function must be public: \ `pub fn`"; @@ -214,14 +211,6 @@ impl CallDef { }, ); - if weight_attrs.is_empty() && dev_mode { - // inject a default O(1) weight when dev mode is enabled and no weight has - // been specified on the call - let empty_weight: syn::Expr = syn::parse(quote::quote!(0).into()) - .expect("we are parsing a quoted string; qed"); - weight_attrs.push(FunctionAttr::Weight(empty_weight)); - } - if weight_attrs.len() != 1 { let msg = if weight_attrs.is_empty() { "Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]`" @@ -298,11 +287,10 @@ impl CallDef { call_index: final_index, args, docs, - attrs: method.attrs.clone(), }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(item.span(), msg)) + return Err(syn::Error::new(impl_item.span(), msg)) } } @@ -311,8 +299,8 @@ impl CallDef { attr_span, instances, methods, - where_clause: item_impl.generics.where_clause.clone(), - docs: get_doc_literals(&item_impl.attrs), + where_clause: item.generics.where_clause.clone(), + docs: get_doc_literals(&item.attrs), }) } } diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 0f3aa69b170ce..60888fc5dd357 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -28,7 +28,6 @@ mod keyword { syn::custom_keyword!(I); syn::custom_keyword!(config); syn::custom_keyword!(IsType); - syn::custom_keyword!(RuntimeEvent); syn::custom_keyword!(Event); syn::custom_keyword!(constant); syn::custom_keyword!(frame_system); @@ -43,9 +42,8 @@ pub struct ConfigDef { pub has_instance: bool, /// Const associated type. pub consts_metadata: Vec, - /// Whether the trait has the associated type `Event`, note that those bounds are - /// checked: - /// * `IsType::RuntimeEvent` + /// Whether the trait has the associated type `Event`, note that those bounds are checked: + /// * `IsType::Event` /// * `From` or `From>` or `From>` pub has_event_type: bool, /// The where clause on trait definition but modified so `Self` is `T`. @@ -161,7 +159,7 @@ impl syn::parse::Parse for ConfigBoundParse { } } -/// Parse for `IsType<::RuntimeEvent>` and retrieve `$ident` +/// Parse for `IsType<::Event>` and retrieve `$ident` pub struct IsTypeBoundEventParse(syn::Ident); impl syn::parse::Parse for IsTypeBoundEventParse { @@ -176,7 +174,7 @@ impl syn::parse::Parse for IsTypeBoundEventParse { input.parse::()?; input.parse::]>()?; input.parse::()?; - input.parse::()?; + input.parse::()?; input.parse::]>()?; Ok(Self(ident)) @@ -214,7 +212,7 @@ impl syn::parse::Parse for FromEventParse { } } -/// Check if trait_item is `type RuntimeEvent`, if so checks its bounds are those expected. +/// Check if trait_item is `type Event`, if so checks its bounds are those expected. /// (Event type is reserved type) fn check_event_type( frame_system: &syn::Ident, @@ -222,10 +220,10 @@ fn check_event_type( trait_has_instance: bool, ) -> syn::Result { if let syn::TraitItem::Type(type_) = trait_item { - if type_.ident == "RuntimeEvent" { + if type_.ident == "Event" { // Check event has no generics if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ + let msg = "Invalid `type Event`, associated type `Event` is reserved and must have\ no generics nor where_clause"; return Err(syn::Error::new(trait_item.span(), msg)) } @@ -238,8 +236,8 @@ fn check_event_type( if !has_is_type_bound { let msg = format!( - "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ - bound: `IsType<::RuntimeEvent>`", + "Invalid `type Event`, associated type `Event` is reserved and must \ + bound: `IsType<::Event>`", frame_system, ); return Err(syn::Error::new(type_.span(), msg)) @@ -253,14 +251,14 @@ fn check_event_type( let from_event_bound = if let Some(b) = from_event_bound { b } else { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + let msg = "Invalid `type Event`, associated type `Event` is reserved and must \ bound: `From` or `From>` or `From>`"; return Err(syn::Error::new(type_.span(), msg)) }; if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ + let msg = "Invalid `type Event`, associated type `Event` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; return Err(syn::Error::new(type_.span(), msg)) diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index f91159248281c..a436f7e09c1d7 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -59,11 +59,10 @@ pub struct Def { pub type_values: Vec, pub frame_system: syn::Ident, pub frame_support: syn::Ident, - pub dev_mode: bool, } impl Def { - pub fn try_from(mut item: syn::ItemMod, dev_mode: bool) -> syn::Result { + pub fn try_from(mut item: syn::ItemMod) -> syn::Result { let frame_system = generate_crate_access_2018("frame-system")?; let frame_support = generate_crate_access_2018("frame-support")?; @@ -106,11 +105,11 @@ impl Def { let m = hooks::HooksDef::try_from(span, index, item)?; hooks = Some(m); }, - Some(PalletAttr::RuntimeCall(span)) if call.is_none() => - call = Some(call::CallDef::try_from(span, index, item, dev_mode)?), + Some(PalletAttr::Call(span)) if call.is_none() => + call = Some(call::CallDef::try_from(span, index, item)?), Some(PalletAttr::Error(span)) if error.is_none() => error = Some(error::ErrorDef::try_from(span, index, item)?), - Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => + Some(PalletAttr::Event(span)) if event.is_none() => event = Some(event::EventDef::try_from(span, index, item)?), Some(PalletAttr::GenesisConfig(_)) if genesis_config.is_none() => { let g = genesis_config::GenesisConfigDef::try_from(index, item)?; @@ -120,12 +119,12 @@ impl Def { let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; genesis_build = Some(g); }, - Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => + Some(PalletAttr::Origin(_)) if origin.is_none() => origin = Some(origin::OriginDef::try_from(index, item)?), Some(PalletAttr::Inherent(_)) if inherent.is_none() => inherent = Some(inherent::InherentDef::try_from(index, item)?), Some(PalletAttr::Storage(span)) => - storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), + storages.push(storage::StorageDef::try_from(span, index, item)?), Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; validate_unsigned = Some(v); @@ -174,7 +173,6 @@ impl Def { type_values, frame_system, frame_support, - dev_mode, }; def.check_instance_usage()?; @@ -184,19 +182,19 @@ impl Def { } /// Check that usage of trait `Event` is consistent with the definition, i.e. it is declared - /// and trait defines type RuntimeEvent, or not declared and no trait associated type. + /// and trait defines type Event, or not declared and no trait associated type. fn check_event_usage(&self) -> syn::Result<()> { match (self.config.has_event_type, self.event.is_some()) { (true, false) => { - let msg = "Invalid usage of RuntimeEvent, `Config` contains associated type `RuntimeEvent`, \ + let msg = "Invalid usage of Event, `Config` contains associated type `Event`, \ but enum `Event` is not declared (i.e. no use of `#[pallet::event]`). \ - Note that type `RuntimeEvent` in trait is reserved to work alongside pallet event."; + Note that type `Event` in trait is reserved to work alongside pallet event."; Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) }, (false, true) => { - let msg = "Invalid usage of RuntimeEvent, `Config` contains no associated type \ - `RuntimeEvent`, but enum `Event` is declared (in use of `#[pallet::event]`). \ - An RuntimeEvent associated type must be declare on trait `Config`."; + let msg = "Invalid usage of Event, `Config` contains no associated type \ + `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). \ + An Event associated type must be declare on trait `Config`."; Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) }, _ => Ok(()), @@ -393,10 +391,10 @@ enum PalletAttr { Config(proc_macro2::Span), Pallet(proc_macro2::Span), Hooks(proc_macro2::Span), - RuntimeCall(proc_macro2::Span), + Call(proc_macro2::Span), Error(proc_macro2::Span), - RuntimeEvent(proc_macro2::Span), - RuntimeOrigin(proc_macro2::Span), + Event(proc_macro2::Span), + Origin(proc_macro2::Span), Inherent(proc_macro2::Span), Storage(proc_macro2::Span), GenesisConfig(proc_macro2::Span), @@ -412,10 +410,10 @@ impl PalletAttr { Self::Config(span) => *span, Self::Pallet(span) => *span, Self::Hooks(span) => *span, - Self::RuntimeCall(span) => *span, + Self::Call(span) => *span, Self::Error(span) => *span, - Self::RuntimeEvent(span) => *span, - Self::RuntimeOrigin(span) => *span, + Self::Event(span) => *span, + Self::Origin(span) => *span, Self::Inherent(span) => *span, Self::Storage(span) => *span, Self::GenesisConfig(span) => *span, @@ -443,13 +441,13 @@ impl syn::parse::Parse for PalletAttr { } else if lookahead.peek(keyword::hooks) { Ok(PalletAttr::Hooks(content.parse::()?.span())) } else if lookahead.peek(keyword::call) { - Ok(PalletAttr::RuntimeCall(content.parse::()?.span())) + Ok(PalletAttr::Call(content.parse::()?.span())) } else if lookahead.peek(keyword::error) { Ok(PalletAttr::Error(content.parse::()?.span())) } else if lookahead.peek(keyword::event) { - Ok(PalletAttr::RuntimeEvent(content.parse::()?.span())) + Ok(PalletAttr::Event(content.parse::()?.span())) } else if lookahead.peek(keyword::origin) { - Ok(PalletAttr::RuntimeOrigin(content.parse::()?.span())) + Ok(PalletAttr::Origin(content.parse::()?.span())) } else if lookahead.peek(keyword::inherent) { Ok(PalletAttr::Inherent(content.parse::()?.span())) } else if lookahead.peek(keyword::storage) { diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 8b551ab31d6c3..1f1bb5b2f26ad 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -28,9 +28,7 @@ mod keyword { syn::custom_keyword!(getter); syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); - syn::custom_keyword!(whitelist_storage); syn::custom_keyword!(OptionQuery); - syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); } @@ -38,21 +36,16 @@ mod keyword { /// * `#[pallet::getter(fn dummy)]` /// * `#[pallet::storage_prefix = "CustomName"]` /// * `#[pallet::unbounded]` -/// * `#[pallet::whitelist_storage] pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), Unbounded(proc_macro2::Span), - WhitelistStorage(proc_macro2::Span), } impl PalletStorageAttr { fn attr_span(&self) -> proc_macro2::Span { match self { - Self::Getter(_, span) | - Self::StorageName(_, span) | - Self::Unbounded(span) | - Self::WhitelistStorage(span) => *span, + Self::Getter(_, span) | Self::StorageName(_, span) | Self::Unbounded(span) => *span, } } } @@ -90,9 +83,6 @@ impl syn::parse::Parse for PalletStorageAttr { content.parse::()?; Ok(Self::Unbounded(attr_span)) - } else if lookahead.peek(keyword::whitelist_storage) { - content.parse::()?; - Ok(Self::WhitelistStorage(attr_span)) } else { Err(lookahead.error()) } @@ -103,7 +93,6 @@ struct PalletStorageAttrInfo { getter: Option, rename_as: Option, unbounded: bool, - whitelisted: bool, } impl PalletStorageAttrInfo { @@ -111,14 +100,12 @@ impl PalletStorageAttrInfo { let mut getter = None; let mut rename_as = None; let mut unbounded = false; - let mut whitelisted = false; for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, - PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, attr => return Err(syn::Error::new( attr.attr_span(), @@ -127,7 +114,7 @@ impl PalletStorageAttrInfo { } } - Ok(PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted }) + Ok(PalletStorageAttrInfo { getter, rename_as, unbounded }) } } @@ -142,7 +129,6 @@ pub enum Metadata { pub enum QueryKind { OptionQuery, - ResultQuery(syn::Path, syn::Ident), ValueQuery, } @@ -167,7 +153,7 @@ pub struct StorageDef { /// Optional expression that evaluates to a type that can be used as StoragePrefix instead of /// ident. pub rename_as: Option, - /// Whereas the querytype of the storage is OptionQuery, ResultQuery or ValueQuery. + /// Whereas the querytype of the storage is OptionQuery or ValueQuery. /// Note that this is best effort as it can't be determined when QueryKind is generic, and /// result can be false if user do some unexpected type alias. pub query_kind: Option, @@ -183,8 +169,6 @@ pub struct StorageDef { pub named_generics: Option, /// If the value stored in this storage is unbounded. pub unbounded: bool, - /// Whether or not reads to this storage key will be ignored by benchmarking - pub whitelisted: bool, } /// The parsed generic from the @@ -555,8 +539,8 @@ fn process_generics( found => { let msg = format!( "Invalid pallet::storage, expected ident: `StorageValue` or \ - `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` \ - in order to expand metadata, found `{}`.", + `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, \ + found `{}`.", found, ); return Err(syn::Error::new(segment.ident.span(), msg)) @@ -678,7 +662,6 @@ impl StorageDef { attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, - dev_mode: bool, ) -> syn::Result { let item = if let syn::Item::Type(item) = item { item @@ -687,11 +670,9 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted } = + let PalletStorageAttrInfo { getter, rename_as, unbounded } = PalletStorageAttrInfo::from_attrs(attrs)?; - // set all storages to be unbounded if dev_mode is enabled - unbounded |= dev_mode; let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); let instances = vec![helper::check_type_def_gen(&item.generics, item.ident.span())?]; @@ -714,105 +695,21 @@ impl StorageDef { let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; let query_kind = query_kind - .map(|query_kind| { - use syn::{ - AngleBracketedGenericArguments, GenericArgument, Path, PathArguments, Type, - TypePath, - }; - - let result_query = match query_kind { - Type::Path(path) - if path - .path - .segments - .last() - .map_or(false, |s| s.ident == "OptionQuery") => - return Ok(Some(QueryKind::OptionQuery)), - Type::Path(TypePath { path: Path { segments, .. }, .. }) - if segments.last().map_or(false, |s| s.ident == "ResultQuery") => - segments - .last() - .expect("segments is checked to have the last value; qed") - .clone(), - Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => - return Ok(Some(QueryKind::ValueQuery)), - _ => return Ok(None), - }; - - let error_type = match result_query.arguments { - PathArguments::AngleBracketed(AngleBracketedGenericArguments { - args, .. - }) => { - if args.len() != 1 { - let msg = format!( - "Invalid pallet::storage, unexpected number of generic arguments \ - for ResultQuery, expected 1 type argument, found {}", - args.len(), - ); - return Err(syn::Error::new(args.span(), msg)) - } - - args[0].clone() - }, - args => { - let msg = format!( - "Invalid pallet::storage, unexpected generic args for ResultQuery, \ - expected angle-bracketed arguments, found `{}`", - args.to_token_stream().to_string() - ); - return Err(syn::Error::new(args.span(), msg)) - }, - }; - - match error_type { - GenericArgument::Type(Type::Path(TypePath { - path: Path { segments: err_variant, leading_colon }, - .. - })) => { - if err_variant.len() < 2 { - let msg = format!( - "Invalid pallet::storage, unexpected number of path segments for \ - the generics in ResultQuery, expected a path with at least 2 \ - segments, found {}", - err_variant.len(), - ); - return Err(syn::Error::new(err_variant.span(), msg)) - } - let mut error = err_variant.clone(); - let err_variant = error - .pop() - .expect("Checked to have at least 2; qed") - .into_value() - .ident; - - // Necessary here to eliminate the last double colon - let last = - error.pop().expect("Checked to have at least 2; qed").into_value(); - error.push_value(last); - - Ok(Some(QueryKind::ResultQuery( - syn::Path { leading_colon, segments: error }, - err_variant, - ))) - }, - gen_arg => { - let msg = format!( - "Invalid pallet::storage, unexpected generic argument kind, expected a \ - type path to a `PalletError` enum variant, found `{}`", - gen_arg.to_token_stream().to_string(), - ); - Err(syn::Error::new(gen_arg.span(), msg)) - }, - } + .map(|query_kind| match query_kind { + syn::Type::Path(path) + if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") => + Some(QueryKind::OptionQuery), + syn::Type::Path(path) + if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => + Some(QueryKind::ValueQuery), + _ => None, }) - .transpose()? - .unwrap_or(Some(QueryKind::OptionQuery)); + .unwrap_or(Some(QueryKind::OptionQuery)); // This value must match the default generic. if let (None, Some(getter)) = (query_kind.as_ref(), getter.as_ref()) { let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ - identifiable. QueryKind must be `OptionQuery`, `ResultQuery`, `ValueQuery`, or default \ - one to be identifiable."; + identifiable. QueryKind must be `OptionQuery`, `ValueQuery`, or default one to be \ + identifiable."; return Err(syn::Error::new(getter.span(), msg)) } @@ -831,7 +728,6 @@ impl StorageDef { cfg_attrs, named_generics, unbounded, - whitelisted, }) } } diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index e8e2d7529cb3f..756653f7ba85d 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -32,7 +32,6 @@ pub(crate) use instance_trait::INHERENT_INSTANCE_NAME; use frame_support_procedural_tools::{ generate_crate_access, generate_hidden_includes, syn_ext as ext, }; - use quote::quote; /// All information contained in input of decl_storage diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index d497a672e2970..ae4230efc63f8 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -31,22 +31,18 @@ pub use crate::{ traits::{ CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, UnfilteredDispatchable, }, + weights::{ + ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, + TransactionPriority, WeighData, Weight, WithPostDispatchInfo, + }, }; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use sp_runtime::{ - generic::{CheckedExtrinsic, UncheckedExtrinsic}, - traits::SignedExtension, -}; -pub use sp_runtime::{ - traits::Dispatchable, transaction_validity::TransactionPriority, DispatchError, RuntimeDebug, -}; -pub use sp_weights::Weight; +pub use sp_runtime::{traits::Dispatchable, DispatchError, RuntimeDebug}; /// The return type of a `Dispatchable` in frame. When returned explicitly from /// a dispatchable function it allows overriding the default `PostDispatchInfo` /// returned from a dispatch. -pub type DispatchResultWithPostInfo = sp_runtime::DispatchResultWithInfo; +pub type DispatchResultWithPostInfo = + sp_runtime::DispatchResultWithInfo; /// Unaugmented version of `DispatchResultWithPostInfo` that can be returned from /// dispatchable functions and is automatically converted to the augmented type. Should be @@ -55,16 +51,17 @@ pub type DispatchResultWithPostInfo = sp_runtime::DispatchResultWithInfo; /// The error type contained in a `DispatchResultWithPostInfo`. -pub type DispatchErrorWithPostInfo = sp_runtime::DispatchErrorWithPostInfo; +pub type DispatchErrorWithPostInfo = + sp_runtime::DispatchErrorWithPostInfo; /// Serializable version of pallet dispatchable. pub trait Callable { - type RuntimeCall: UnfilteredDispatchable + Codec + Clone + PartialEq + Eq; + type Call: UnfilteredDispatchable + Codec + Clone + PartialEq + Eq; } // dirty hack to work around serde_derive issue // https://github.com/rust-lang/rust/issues/51331 -pub type CallableCallFor =
>::RuntimeCall; +pub type CallableCallFor = >::Call; /// Origin for the System pallet. #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -94,552 +91,6 @@ impl From> for RawOrigin { pub trait Parameter: Codec + EncodeLike + Clone + Eq + fmt::Debug + scale_info::TypeInfo {} impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + scale_info::TypeInfo {} -/// Means of classifying a dispatchable function. -pub trait ClassifyDispatch { - /// Classify the dispatch function based on input data `target` of type `T`. When implementing - /// this for a dispatchable, `T` will be a tuple of all arguments given to the function (except - /// origin). - fn classify_dispatch(&self, target: T) -> DispatchClass; -} - -/// Indicates if dispatch function should pay fees or not. -/// -/// If set to `Pays::No`, the block resource limits are applied, yet no fee is deducted. -pub trait PaysFee { - fn pays_fee(&self, _target: T) -> Pays; -} - -/// Explicit enum to denote if a transaction pays fee or not. -#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, TypeInfo)] -pub enum Pays { - /// Transactor will pay related fees. - Yes, - /// Transactor will NOT pay related fees. - No, -} - -impl Default for Pays { - fn default() -> Self { - Self::Yes - } -} - -impl From for PostDispatchInfo { - fn from(pays_fee: Pays) -> Self { - Self { actual_weight: None, pays_fee } - } -} - -/// A generalized group of dispatch types. -/// -/// NOTE whenever upgrading the enum make sure to also update -/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum DispatchClass { - /// A normal dispatch. - Normal, - /// An operational dispatch. - Operational, - /// A mandatory dispatch. These kinds of dispatch are always included regardless of their - /// weight, therefore it is critical that they are separately validated to ensure that a - /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just - /// means ensuring that the extrinsic can only be included once and that it is always very - /// light. - /// - /// Do *NOT* use it for extrinsics that can be heavy. - /// - /// The only real use case for this is inherent extrinsics that are required to execute in a - /// block for the block to be valid, and it solves the issue in the case that the block - /// initialization is sufficiently heavy to mean that those inherents do not fit into the - /// block. Essentially, we assume that in these exceptional circumstances, it is better to - /// allow an overweight block to be created than to not allow any block at all to be created. - Mandatory, -} - -impl Default for DispatchClass { - fn default() -> Self { - Self::Normal - } -} - -impl DispatchClass { - /// Returns an array containing all dispatch classes. - pub fn all() -> &'static [DispatchClass] { - &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] - } - - /// Returns an array of all dispatch classes except `Mandatory`. - pub fn non_mandatory() -> &'static [DispatchClass] { - &[DispatchClass::Normal, DispatchClass::Operational] - } -} - -/// A trait that represents one or many values of given type. -/// -/// Useful to accept as parameter type to let the caller pass either a single value directly -/// or an iterator. -pub trait OneOrMany { - /// The iterator type. - type Iter: Iterator; - /// Convert this item into an iterator. - fn into_iter(self) -> Self::Iter; -} - -impl OneOrMany for DispatchClass { - type Iter = sp_std::iter::Once; - fn into_iter(self) -> Self::Iter { - sp_std::iter::once(self) - } -} - -impl<'a> OneOrMany for &'a [DispatchClass] { - type Iter = sp_std::iter::Cloned>; - fn into_iter(self) -> Self::Iter { - self.iter().cloned() - } -} - -/// A bundle of static information collected from the `#[pallet::weight]` attributes. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct DispatchInfo { - /// Weight of this transaction. - pub weight: Weight, - /// Class of this transaction. - pub class: DispatchClass, - /// Does this transaction pay fees. - pub pays_fee: Pays, -} - -/// A `Dispatchable` function (aka transaction) that can carry some static information along with -/// it, using the `#[pallet::weight]` attribute. -pub trait GetDispatchInfo { - /// Return a `DispatchInfo`, containing relevant information of this dispatch. - /// - /// This is done independently of its encoded size. - fn get_dispatch_info(&self) -> DispatchInfo; -} - -impl GetDispatchInfo for () { - fn get_dispatch_info(&self) -> DispatchInfo { - DispatchInfo::default() - } -} - -/// Extract the actual weight from a dispatch result if any or fall back to the default weight. -pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Weight { - match result { - Ok(post_info) => post_info, - Err(err) => &err.post_info, - } - .calc_actual_weight(info) -} - -/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. -pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { - match result { - Ok(post_info) => post_info, - Err(err) => &err.post_info, - } - .pays_fee(info) -} - -/// Weight information that is only available post dispatch. -/// NOTE: This can only be used to reduce the weight or fee, not increase it. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct PostDispatchInfo { - /// Actual weight consumed by a call or `None` which stands for the worst case static weight. - pub actual_weight: Option, - /// Whether this transaction should pay fees when all is said and done. - pub pays_fee: Pays, -} - -impl PostDispatchInfo { - /// Calculate how much (if any) weight was not used by the `Dispatchable`. - pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { - info.weight - self.calc_actual_weight(info) - } - - /// Calculate how much weight was actually spent by the `Dispatchable`. - pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight { - if let Some(actual_weight) = self.actual_weight { - actual_weight.min(info.weight) - } else { - info.weight - } - } - - /// Determine if user should actually pay fees at the end of the dispatch. - pub fn pays_fee(&self, info: &DispatchInfo) -> Pays { - // If they originally were not paying fees, or the post dispatch info - // says they should not pay fees, then they don't pay fees. - // This is because the pre dispatch information must contain the - // worst case for weight and fees paid. - if info.pays_fee == Pays::No || self.pays_fee == Pays::No { - Pays::No - } else { - // Otherwise they pay. - Pays::Yes - } - } -} - -impl From<()> for PostDispatchInfo { - fn from(_: ()) -> Self { - Self { actual_weight: None, pays_fee: Default::default() } - } -} - -impl sp_runtime::traits::Printable for PostDispatchInfo { - fn print(&self) { - "actual_weight=".print(); - match self.actual_weight { - Some(weight) => weight.print(), - None => "max-weight".print(), - }; - "pays_fee=".print(); - match self.pays_fee { - Pays::Yes => "Yes".print(), - Pays::No => "No".print(), - } - } -} - -/// Allows easy conversion from `DispatchError` to `DispatchErrorWithPostInfo` for dispatchables -/// that want to return a custom a posterior weight on error. -pub trait WithPostDispatchInfo { - /// Call this on your modules custom errors type in order to return a custom weight on error. - /// - /// # Example - /// - /// ```ignore - /// let who = ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100)))?; - /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); - /// ``` - fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; -} - -impl WithPostDispatchInfo for T -where - T: Into, -{ - fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { - DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { - actual_weight: Some(actual_weight), - pays_fee: Default::default(), - }, - error: self.into(), - } - } -} - -/// Implementation for unchecked extrinsic. -impl GetDispatchInfo - for UncheckedExtrinsic -where - Call: GetDispatchInfo, - Extra: SignedExtension, -{ - fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } -} - -/// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic -where - Call: GetDispatchInfo, -{ - fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } -} - -/// Implementation for test extrinsic. -#[cfg(feature = "std")] -impl GetDispatchInfo for sp_runtime::testing::TestXt { - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: Weight::from_ref_time(self.encode().len() as _), - pays_fee: Pays::Yes, - ..Default::default() - } - } -} - -/// A struct holding value for each `DispatchClass`. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct PerDispatchClass { - /// Value for `Normal` extrinsics. - normal: T, - /// Value for `Operational` extrinsics. - operational: T, - /// Value for `Mandatory` extrinsics. - mandatory: T, -} - -impl PerDispatchClass { - /// Create new `PerDispatchClass` with the same value for every class. - pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { - Self { - normal: val(DispatchClass::Normal), - operational: val(DispatchClass::Operational), - mandatory: val(DispatchClass::Mandatory), - } - } - - /// Get a mutable reference to current value of given class. - pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { - match class { - DispatchClass::Operational => &mut self.operational, - DispatchClass::Normal => &mut self.normal, - DispatchClass::Mandatory => &mut self.mandatory, - } - } - - /// Get current value for given class. - pub fn get(&self, class: DispatchClass) -> &T { - match class { - DispatchClass::Normal => &self.normal, - DispatchClass::Operational => &self.operational, - DispatchClass::Mandatory => &self.mandatory, - } - } -} - -impl PerDispatchClass { - /// Set the value of given class. - pub fn set(&mut self, new: T, class: impl OneOrMany) { - for class in class.into_iter() { - *self.get_mut(class) = new.clone(); - } - } -} - -impl PerDispatchClass { - /// Returns the total weight consumed by all extrinsics in the block. - pub fn total(&self) -> Weight { - let mut sum = Weight::zero(); - for class in DispatchClass::all() { - sum = sum.saturating_add(*self.get(*class)); - } - sum - } - - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); - } - - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would - /// occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(&weight).ok_or(())?; - Ok(()) - } - - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of - /// `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); - } -} - -/// Means of weighing some particular kind of data (`T`). -pub trait WeighData { - /// Weigh the data `T` given by `target`. When implementing this for a dispatchable, `T` will be - /// a tuple of all arguments given to the function (except origin). - fn weigh_data(&self, target: T) -> Weight; -} - -impl WeighData for Weight { - fn weigh_data(&self, _: T) -> Weight { - return *self - } -} - -impl PaysFee for (Weight, DispatchClass, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.2 - } -} - -impl WeighData for (Weight, DispatchClass) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl WeighData for (Weight, DispatchClass, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (Weight, DispatchClass) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (Weight, DispatchClass) { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (Weight, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (Weight, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for (Weight, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.1 - } -} - -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (actual_weight, pays_fee) = post_weight_info; - Self { actual_weight, pays_fee } - } -} - -impl From> for PostDispatchInfo { - fn from(actual_weight: Option) -> Self { - Self { actual_weight, pays_fee: Default::default() } - } -} - -impl ClassifyDispatch for Weight { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for Weight { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl ClassifyDispatch for (Weight, DispatchClass, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -// TODO: Eventually remove these - -impl From> for PostDispatchInfo { - fn from(maybe_actual_computation: Option) -> Self { - let actual_weight = match maybe_actual_computation { - Some(actual_computation) => Some(Weight::zero().set_ref_time(actual_computation)), - None => None, - }; - Self { actual_weight, pays_fee: Default::default() } - } -} - -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (maybe_actual_time, pays_fee) = post_weight_info; - let actual_weight = match maybe_actual_time { - Some(actual_time) => Some(Weight::zero().set_ref_time(actual_time)), - None => None, - }; - Self { actual_weight, pays_fee } - } -} - -impl ClassifyDispatch for u64 { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for u64 { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for u64 { - fn weigh_data(&self, _: T) -> Weight { - return Weight::zero().set_ref_time(*self) - } -} - -impl WeighData for (u64, DispatchClass, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (u64, DispatchClass, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (u64, DispatchClass, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.2 - } -} - -impl WeighData for (u64, DispatchClass) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (u64, DispatchClass) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (u64, DispatchClass) { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (u64, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (u64, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for (u64, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.1 - } -} - -// END TODO - /// Declares a `Module` struct and a `Call` enum, which implements the dispatch logic. /// /// ## Declaration @@ -650,7 +101,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::dispatch; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module for enum Call where origin: T::Origin { /// /// // Private functions are dispatchable, but not available to other /// // FRAME pallets. @@ -677,7 +128,7 @@ impl PaysFee for (u64, Pays) { /// * `Module`: The struct generated by the macro, with type `Config`. /// * `Call`: The enum generated for every pallet, which implements /// [`Callable`](./dispatch/trait.Callable.html). -/// * `origin`: Alias of `T::RuntimeOrigin`. +/// * `origin`: Alias of `T::Origin`. /// * `Result`: The expected return type from pallet functions. /// /// The first parameter of dispatchable functions must always be `origin`. @@ -693,7 +144,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::dispatch; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_long_function(origin) -> dispatch::DispatchResult { /// // Your implementation @@ -725,18 +176,18 @@ impl PaysFee for (u64, Pays) { /// ``` /// # #[macro_use] /// # extern crate frame_support; -/// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}}; +/// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { -/// ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100_000)))?; +/// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; /// if do_expensive_calc { /// // do the expensive calculation /// // ... /// // return None to indicate that we are using all weight (the default) -/// return Ok(None::.into()); +/// return Ok(None.into()); /// } /// // expensive calculation not executed: use only a portion of the weight /// Ok(Some(100_000).into()) @@ -758,7 +209,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::transactional; /// # use frame_system::Config; /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// #[transactional] /// fn my_short_function(origin) { @@ -779,7 +230,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::dispatch; /// # use frame_system::{Config, ensure_signed, ensure_root}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; @@ -819,7 +270,7 @@ impl PaysFee for (u64, Pays) { /// pub trait Config: frame_system::Config {} /// /// decl_module! { -/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { /// // Your implementation /// } /// } @@ -831,7 +282,7 @@ impl PaysFee for (u64, Pays) { /// /// ## Where clause /// -/// Besides the default `origin: T::RuntimeOrigin`, you can also pass other bounds to the module +/// Besides the default `origin: T::Origin`, you can also pass other bounds to the module /// declaration. This where bound will be replicated to all types generated by this macro. The /// chaining of multiple trait bounds with `+` is not supported. If multiple bounds for one type are /// required, it needs to be split up into multiple bounds. @@ -844,7 +295,7 @@ impl PaysFee for (u64, Pays) { /// pub trait Config: system::Config where Self::AccountId: From {} /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin, T::AccountId: From { +/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { /// // Your implementation /// } /// } @@ -855,7 +306,7 @@ impl PaysFee for (u64, Pays) { /// /// The following are reserved function signatures: /// -/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/main-docs/build/events-errors/). +/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/v3/runtime/events-and-errors). /// The default behavior is to call `deposit_event` from the [System /// module](../frame_system/index.html). However, you can write your own implementation for events /// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your @@ -1885,7 +1336,7 @@ macro_rules! decl_module { ) ); }; - // Ignore any ident which is not `origin` with type `T::RuntimeOrigin`. + // Ignore any ident which is not `origin` with type `T::Origin`. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> @@ -1906,7 +1357,7 @@ macro_rules! decl_module { $(#[weight = $weight:expr])? $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( - $origin:ident : T::RuntimeOrigin $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? + $origin:ident : T::Origin $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } $($rest:tt)* ) => { @@ -2046,7 +1497,7 @@ macro_rules! decl_module { { /// Deposits an event using `frame_system::Pallet::deposit_event`. $vis fn deposit_event( - event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::RuntimeEvent> + event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::Event> ) { <$system::Pallet<$trait_instance>>::deposit_event(event.into()) } @@ -2098,35 +1549,6 @@ macro_rules! decl_module { {} }; - (@impl_try_state_default - { $system:ident } - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - ) => { - #[cfg(feature = "try-runtime")] - impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> - $crate::traits::TryState<<$trait_instance as $system::Config>::BlockNumber> - for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - { - fn try_state( - _: <$trait_instance as $system::Config>::BlockNumber, - _: $crate::traits::TryStateSelect, - ) -> Result<(), &'static str> { - let pallet_name = << - $trait_instance - as - $system::Config - >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); - $crate::log::debug!( - target: $crate::LOG_TARGET, - "⚠️ pallet {} cannot have try-state because it is using decl_module!", - pallet_name, - ); - Ok(()) - } - } - }; - (@impl_on_runtime_upgrade { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; @@ -2158,12 +1580,12 @@ macro_rules! decl_module { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<$crate::sp_std::vec::Vec, &'static str> { - Ok($crate::sp_std::vec::Vec::new()) + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: $crate::sp_std::vec::Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { Ok(()) } } @@ -2192,16 +1614,16 @@ macro_rules! decl_module { pallet_name, ); - $crate::dispatch::Weight::zero() + 0 } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<$crate::sp_std::vec::Vec, &'static str> { - Ok($crate::sp_std::vec::Vec::new()) + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_: $crate::sp_std::vec::Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { Ok(()) } } @@ -2365,11 +1787,9 @@ macro_rules! decl_module { $vis fn $name( $origin: $origin_ty $(, $param: $param_ty )* ) -> $crate::dispatch::DispatchResult { - $crate::storage::with_storage_layer(|| { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - { $( $impl )* } - Ok(()) - }) + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + { $( $impl )* } + Ok(()) } }; @@ -2385,10 +1805,8 @@ macro_rules! decl_module { ) => { $(#[$fn_attr])* $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { - $crate::storage::with_storage_layer(|| { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - $( $impl )* - }) + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + $( $impl )* } }; @@ -2604,13 +2022,6 @@ macro_rules! decl_module { $( $on_initialize )* } - $crate::decl_module! { - @impl_try_state_default - { $system } - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - } - $crate::decl_module! { @impl_on_runtime_upgrade { $system } @@ -2792,7 +2203,7 @@ macro_rules! decl_module { for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { fn count() -> usize { 1 } - fn infos() -> $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData> { + fn accumulate(acc: &mut $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData>) { use $crate::traits::PalletInfoAccess; let item = $crate::traits::PalletInfoData { index: Self::index(), @@ -2800,7 +2211,7 @@ macro_rules! decl_module { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - vec![item] + acc.push(item); } } @@ -2906,8 +2317,8 @@ macro_rules! decl_module { impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::UnfilteredDispatchable for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { - type RuntimeOrigin = $origin_type; - fn dispatch_bypass_filter(self, _origin: Self::RuntimeOrigin) -> $crate::dispatch::DispatchResultWithPostInfo { + type Origin = $origin_type; + fn dispatch_bypass_filter(self, _origin: Self::Origin) -> $crate::dispatch::DispatchResultWithPostInfo { match self { $( $call_type::$fn_name { $( $param_name ),* } => { @@ -2925,7 +2336,7 @@ macro_rules! decl_module { impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Callable<$trait_instance> for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { - type RuntimeCall = $call_type<$trait_instance $(, $instance)?>; + type Call = $call_type<$trait_instance $(, $instance)?>; } $crate::__dispatch_impl_metadata! { @@ -3178,14 +2589,13 @@ macro_rules! __check_reserved_fn_name { mod tests { use super::*; use crate::{ - dispatch::{DispatchClass, DispatchInfo, Pays}, metadata::*, traits::{ - CallerTrait, CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, - OnInitialize, OnRuntimeUpgrade, PalletInfo, + CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, + OnRuntimeUpgrade, PalletInfo, }, + weights::{DispatchClass, DispatchInfo, Pays, RuntimeDbWeight}, }; - use sp_weights::RuntimeDbWeight; pub trait Config: system::Config + Sized where @@ -3198,9 +2608,9 @@ mod tests { pub trait Config: 'static { type AccountId; - type RuntimeCall; + type Call; type BaseCallFilter; - type RuntimeOrigin: crate::traits::OriginTrait; + type Origin: crate::traits::OriginTrait; type BlockNumber: Into; type PalletInfo: crate::traits::PalletInfo; type DbWeight: Get; @@ -3212,7 +2622,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system = system, T::AccountId: From { + pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { /// Hi, this is a comment. #[weight = 0] fn aux_0(_origin) -> DispatchResult { unreachable!() } @@ -3235,13 +2645,13 @@ mod tests { #[weight = (5, DispatchClass::Operational)] fn operational(_origin) { unreachable!() } - fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } Weight::from_ref_time(7) } + fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } fn on_idle(n: T::BlockNumber, remaining_weight: Weight,) -> Weight { - if n.into() == 42 || remaining_weight == Weight::from_ref_time(42) { panic!("on_idle") } - Weight::from_ref_time(7) + if n.into() == 42 || remaining_weight == 42 { panic!("on_idle") } + 7 } fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } - fn on_runtime_upgrade() -> Weight { Weight::from_ref_time(10) } + fn on_runtime_upgrade() -> Weight { 10 } fn offchain_worker() {} /// Some doc fn integrity_test() { panic!("integrity_test") } @@ -3300,18 +2710,8 @@ mod tests { } } - impl CallerTrait<::AccountId> for OuterOrigin { - fn into_system(self) -> Option::AccountId>> { - unimplemented!("Not required in tests!") - } - - fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { - unimplemented!("Not required in tests!") - } - } - impl crate::traits::OriginTrait for OuterOrigin { - type Call = ::RuntimeCall; + type Call = ::Call; type PalletsOrigin = OuterOrigin; type AccountId = ::AccountId; @@ -3335,10 +2735,6 @@ mod tests { unimplemented!("Not required in tests!") } - fn into_caller(self) -> Self::PalletsOrigin { - unimplemented!("Not required in tests!") - } - fn try_with_caller( self, _f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -3358,15 +2754,12 @@ mod tests { fn as_signed(self) -> Option { unimplemented!("Not required in tests!") } - fn as_system_ref(&self) -> Option<&RawOrigin> { - unimplemented!("Not required in tests!") - } } impl system::Config for TraitImpl { - type RuntimeOrigin = OuterOrigin; + type Origin = OuterOrigin; type AccountId = u32; - type RuntimeCall = (); + type Call = (); type BaseCallFilter = frame_support::traits::Everything; type BlockNumber = u32; type PalletInfo = Self; @@ -3417,30 +2810,24 @@ mod tests { #[test] fn on_initialize_should_work_2() { - assert_eq!( - as OnInitialize>::on_initialize(10), - Weight::from_ref_time(7) - ); + assert_eq!( as OnInitialize>::on_initialize(10), 7); } #[test] #[should_panic(expected = "on_idle")] fn on_idle_should_work_1() { - as OnIdle>::on_idle(42, Weight::from_ref_time(9)); + as OnIdle>::on_idle(42, 9); } #[test] #[should_panic(expected = "on_idle")] fn on_idle_should_work_2() { - as OnIdle>::on_idle(9, Weight::from_ref_time(42)); + as OnIdle>::on_idle(9, 42); } #[test] fn on_idle_should_work_3() { - assert_eq!( - as OnIdle>::on_idle(10, Weight::from_ref_time(11)), - Weight::from_ref_time(7) - ); + assert_eq!( as OnIdle>::on_idle(10, 11), 7); } #[test] @@ -3452,10 +2839,7 @@ mod tests { #[test] fn on_runtime_upgrade_should_work() { sp_io::TestExternalities::default().execute_with(|| { - assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), - Weight::from_ref_time(10) - ) + assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10) }); } @@ -3464,20 +2848,12 @@ mod tests { // operational. assert_eq!( Call::::operational {}.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_ref_time(5), - class: DispatchClass::Operational, - pays_fee: Pays::Yes - }, + DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, ); // custom basic assert_eq!( Call::::aux_3 {}.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_ref_time(3), - class: DispatchClass::Normal, - pays_fee: Pays::Yes - }, + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, ); } @@ -3507,187 +2883,3 @@ mod tests { Call::::new_call_variant_aux_0(); } } - -#[cfg(test)] -// Do not complain about unused `dispatch` and `dispatch_aux`. -#[allow(dead_code)] -mod weight_tests { - use super::*; - use sp_core::{parameter_types, Get}; - use sp_weights::RuntimeDbWeight; - - pub trait Config: 'static { - type RuntimeOrigin; - type Balance; - type BlockNumber; - type DbWeight: Get; - type PalletInfo: crate::traits::PalletInfo; - } - - pub struct TraitImpl {} - - parameter_types! { - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 100, - write: 1000, - }; - } - - impl Config for TraitImpl { - type RuntimeOrigin = u32; - type BlockNumber = u32; - type Balance = u32; - type DbWeight = DbWeight; - type PalletInfo = crate::tests::PanicPalletInfo; - } - - decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { - // no arguments, fixed weight - #[weight = 1000] - fn f00(_origin) { unimplemented!(); } - - #[weight = (1000, DispatchClass::Mandatory)] - fn f01(_origin) { unimplemented!(); } - - #[weight = (1000, Pays::No)] - fn f02(_origin) { unimplemented!(); } - - #[weight = (1000, DispatchClass::Operational, Pays::No)] - fn f03(_origin) { unimplemented!(); } - - // weight = a x 10 + b - #[weight = ((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes)] - fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } - - #[weight = (0, DispatchClass::Operational, Pays::Yes)] - fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } - - #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_ref_time(10_000)] - fn f20(_origin) { unimplemented!(); } - - #[weight = T::DbWeight::get().reads_writes(6, 5) + Weight::from_ref_time(40_000)] - fn f21(_origin) { unimplemented!(); } - - } - } - - #[test] - fn weights_are_correct() { - // #[weight = 1000] - let info = Call::::f00 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = (1000, DispatchClass::Mandatory)] - let info = Call::::f01 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Mandatory); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = (1000, Pays::No)] - let info = Call::::f02 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::No); - - // #[weight = (1000, DispatchClass::Operational, Pays::No)] - let info = Call::::f03 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Operational); - assert_eq!(info.pays_fee, Pays::No); - - // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] - let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(150)); // 13*10 + 20 - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = (0, DispatchClass::Operational, Pays::Yes)] - let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(0)); - assert_eq!(info.class, DispatchClass::Operational); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] - let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(12300)); // 100*3 + 1000*2 + 10_1000 - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] - let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(45600)); // 100*6 + 1000*5 + 40_1000 - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - } - - #[test] - fn extract_actual_weight_works() { - let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), Weight::from_ref_time(7)); - assert_eq!( - extract_actual_weight(&Ok(Some(1000).into()), &pre), - Weight::from_ref_time(1000) - ); - assert_eq!( - extract_actual_weight( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), - &pre - ), - Weight::from_ref_time(9) - ); - } - - #[test] - fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; - assert_eq!( - extract_actual_weight(&Ok(Some(1250).into()), &pre), - Weight::from_ref_time(1000) - ); - assert_eq!( - extract_actual_weight( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(1300))), - &pre - ), - Weight::from_ref_time(1000), - ); - } - - #[test] - fn extract_actual_pays_fee_works() { - let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; - assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); - assert_eq!( - extract_actual_pays_fee( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), - &pre - ), - Pays::Yes - ); - assert_eq!( - extract_actual_pays_fee( - &Err(DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }, - error: DispatchError::BadOrigin, - }), - &pre - ), - Pays::No - ); - - let pre = DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }; - assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); - assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); - } -} diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index 337bd75895c2c..0ffe4334e2e30 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -51,7 +51,7 @@ pub use sp_runtime::traits::{BadOrigin, LookupError}; /// // exported in the metadata. /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::RuntimeOrigin { +/// pub struct Module for enum Call where origin: T::Origin { /// type Error = MyError; /// /// #[weight = 0] diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 84e416e50544d..8e43df82a284c 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -16,16 +16,6 @@ // limitations under the License. //! Support code for the runtime. -//! -//! ## Note on Tuple Traits -//! -//! Many of the traits defined in [`traits`] have auto-implementations on tuples as well. Usually, -//! the tuple is a function of number of pallets in the runtime. By default, the traits are -//! implemented for tuples of up to 64 items. -// -// If you have more pallets in your runtime, or for any other reason need more, enabled `tuples-96` -// or the `tuples-128` complication flag. Note that these features *will increase* the compilation -// of this crate. #![cfg_attr(not(feature = "std"), no_std)] @@ -93,8 +83,6 @@ pub mod unsigned { }; } -#[cfg(any(feature = "std", feature = "runtime-benchmarks", feature = "try-runtime", test))] -pub use self::storage::storage_noop_guard::StorageNoopGuard; pub use self::{ dispatch::{Callable, Parameter}, hash::{ @@ -283,87 +271,81 @@ pub use frame_support_procedural::storage_alias; macro_rules! parameter_types { ( $( #[ $attr:meta ] )* - $vis:vis const $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; + $vis:vis const $name:ident: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name $( - < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) - )?; - $crate::parameter_types!(IMPL_CONST $name , $type , $value $( $(, $ty_params)* )?); + $vis struct $name; + $crate::parameter_types!(IMPL_CONST $name , $type , $value); $crate::parameter_types!( $( $rest )* ); ); ( $( #[ $attr:meta ] )* - $vis:vis $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; + $vis:vis $name:ident: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name $( - < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) - )?; - $crate::parameter_types!(IMPL $name, $type, $value $( $(, $ty_params)* )?); + $vis struct $name; + $crate::parameter_types!(IMPL $name, $type, $value); $crate::parameter_types!( $( $rest )* ); ); ( $( #[ $attr:meta ] )* - $vis:vis storage $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; + $vis:vis storage $name:ident: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name $( - < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) - )?; - $crate::parameter_types!(IMPL_STORAGE $name, $type, $value $( $(, $ty_params)* )?); + $vis struct $name; + $crate::parameter_types!(IMPL_STORAGE $name, $type, $value); $crate::parameter_types!( $( $rest )* ); ); () => (); - (IMPL_CONST $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { - impl< $($ty_params),* > $name< $($ty_params),* > { + (IMPL_CONST $name:ident, $type:ty, $value:expr) => { + impl $name { /// Returns the value of this parameter type. pub const fn get() -> $type { $value } } - impl<_I: From<$type> $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { - fn get() -> _I { - _I::from(Self::get()) + impl> $crate::traits::Get for $name { + fn get() -> I { + I::from(Self::get()) } } - impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { + impl $crate::traits::TypedGet for $name { type Type = $type; fn get() -> $type { Self::get() } } }; - (IMPL $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { - impl< $($ty_params),* > $name< $($ty_params),* > { + (IMPL $name:ident, $type:ty, $value:expr) => { + impl $name { /// Returns the value of this parameter type. pub fn get() -> $type { $value } } - impl<_I: From<$type>, $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { - fn get() -> _I { - _I::from(Self::get()) + impl> $crate::traits::Get for $name { + fn get() -> I { + I::from(Self::get()) } } - impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { + impl $crate::traits::TypedGet for $name { type Type = $type; fn get() -> $type { Self::get() } } }; - (IMPL_STORAGE $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { - #[allow(unused)] - impl< $($ty_params),* > $name< $($ty_params),* > { + (IMPL_STORAGE $name:ident, $type:ty, $value:expr) => { + impl $name { /// Returns the key for this parameter type. + #[allow(unused)] pub fn key() -> [u8; 16] { $crate::sp_core_hashing_proc_macro::twox_128!(b":", $name, b":") } @@ -372,6 +354,7 @@ macro_rules! parameter_types { /// /// This needs to be executed in an externalities provided /// environment. + #[allow(unused)] pub fn set(value: &$type) { $crate::storage::unhashed::put(&Self::key(), value); } @@ -386,13 +369,13 @@ macro_rules! parameter_types { } } - impl<_I: From<$type> $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { - fn get() -> _I { - _I::from(Self::get()) + impl> $crate::traits::Get for $name { + fn get() -> I { + I::from(Self::get()) } } - impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { + impl $crate::traits::TypedGet for $name { type Type = $type; fn get() -> $type { Self::get() @@ -445,23 +428,6 @@ macro_rules! parameter_types_impl_thread_local { pub fn set(t: $type) { [<$name:snake:upper>].with(|v| *v.borrow_mut() = t); } - - /// Mutate the internal value in place. - #[allow(unused)] - pub fn mutate R>(mutate: F) -> R{ - let mut current = Self::get(); - let result = mutate(&mut current); - Self::set(current); - result - } - - /// Get current value and replace with initial value of the parameter type. - #[allow(unused)] - pub fn take() -> $type { - let current = Self::get(); - Self::set($value); - current - } } )* } @@ -744,7 +710,7 @@ macro_rules! assert_err { /// Assert an expression returns an error specified. /// -/// This can be used on `DispatchResultWithPostInfo` when the post info should +/// This can be used on`DispatchResultWithPostInfo` when the post info should /// be ignored. #[macro_export] macro_rules! assert_err_ignore_postinfo { @@ -846,7 +812,7 @@ pub mod tests { pub trait Config: 'static { type BlockNumber: Codec + EncodeLike + Default + TypeInfo; - type RuntimeOrigin; + type Origin; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } @@ -857,7 +823,7 @@ pub mod tests { use super::Config; decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } } @@ -889,7 +855,7 @@ pub mod tests { impl Config for Test { type BlockNumber = u32; - type RuntimeOrigin = u32; + type Origin = u32; type PalletInfo = PanicPalletInfo; type DbWeight = (); } @@ -1377,29 +1343,26 @@ pub mod pallet_prelude { #[cfg(feature = "std")] pub use crate::traits::GenesisBuild; pub use crate::{ - dispatch::{ - DispatchClass, DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter, - Pays, - }, + dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter}, ensure, inherent::{InherentData, InherentIdentifier, ProvideInherent}, storage, storage::{ bounded_vec::BoundedVec, types::{ - CountedStorageMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap, - StorageMap, StorageNMap, StorageValue, ValueQuery, + CountedStorageMap, Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, + StorageNMap, StorageValue, ValueQuery, }, }, traits::{ ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, }, + weights::{DispatchClass, Pays, Weight}, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; pub use codec::{Decode, Encode, MaxEncodedLen}; - pub use frame_support::pallet_macros::*; pub use scale_info::TypeInfo; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, @@ -1411,166 +1374,40 @@ pub mod pallet_prelude { MAX_MODULE_ERROR_ENCODED_SIZE, }; pub use sp_std::marker::PhantomData; - pub use sp_weights::Weight; } -/// The `pallet` attribute macro defines a pallet that can be used with -/// [`construct_runtime!`]. It must be attached to a module named `pallet` as follows: +/// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. /// +/// It is define by a module item: /// ```ignore /// #[pallet] /// pub mod pallet { -/// ... +/// ... /// } /// ``` /// -/// Note that various types can be automatically imported using -/// [`frame_support::pallet_prelude`] and `frame_system::pallet_prelude`: +/// Inside the module the macro will parse item with the attribute: `#[pallet::*]`, some +/// attributes are mandatory, some other optional. +/// +/// The attribute are explained with the syntax of non instantiable pallets, to see how pallet +/// with instance work see below example. /// +/// Note various type can be automatically imported using pallet_prelude in frame_support and +/// frame_system: /// ```ignore /// #[pallet] /// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// ... /// } /// ``` /// -/// # pallet::* Attributes -/// -/// The `pallet` macro will parse any items within your `pallet` module that are annotated with -/// `#[pallet::*]` attributes. Some of these attributes are mandatory and some are optional, -/// and they can attach to different types of items within your pallet depending on the -/// attribute in question. The full list of `#[pallet::*]` attributes is shown below in the -/// order in which they are mentioned in this document: -/// -/// * [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) -/// * [`pallet::config`](#config-trait-palletconfig-mandatory) -/// * [`pallet::constant`](#palletconstant) -/// * [`pallet::disable_frame_system_supertrait_check`](#disable_supertrait_check) -/// * [`pallet::generate_store($vis trait Store)`](#palletgenerate_storevis-trait-store) -/// * [`pallet::generate_storage_info`](#palletgenerate_storage_info) -/// * [`pallet::storage_version`](#palletstorage_version) -/// * [`pallet::hooks`](#hooks-pallethooks-optional) -/// * [`pallet::call`](#call-palletcall-optional) -/// * [`pallet::weight($expr)`](#palletweightexpr) -/// * [`pallet::compact`](#palletcompact-some_arg-some_type) -/// * [`pallet::call_index($idx)`](#palletcall_indexidx) -/// * [`pallet::extra_constants`](#extra-constants-palletextra_constants-optional) -/// * [`pallet::error`](#error-palleterror-optional) -/// * [`pallet::event`](#event-palletevent-optional) -/// * [`pallet::generate_deposit($visibility fn -/// deposit_event)`](#palletgenerate_depositvisibility-fn-deposit_event) -/// * [`pallet::storage`](#storage-palletstorage-optional) -/// * [`pallet::getter(fn $my_getter_fn_name)`](#palletgetterfn-my_getter_fn_name-optional) -/// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) -/// * [`pallet::unbounded`](#palletunbounded-optional) -/// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) -/// * [`cfg(..)`](#cfg-for-storage) (on storage items) -/// * [`pallet::type_value`](#type-value-pallettype_value-optional) -/// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) -/// * [`pallet::genesis_build`](#genesis-build-palletgenesis_build-optional) -/// * [`pallet::inherent`](#inherent-palletinherent-optional) -/// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) -/// * [`pallet::origin`](#origin-palletorigin-optional) -/// -/// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these -/// attributes, ultimately removing their AST nodes before they can be parsed as real -/// attribute macro calls. This means that technically we do not need attribute macro -/// definitions for any of these attributes, however, for consistency and discoverability -/// reasons, we still maintain stub attribute macro definitions for all of these attributes in -/// the [`pallet_macros`] module which is automatically included in all pallets as part of the -/// pallet prelude. The actual "work" for all of these attribute macros can be found in the -/// macro expansion for `#[pallet]`. -/// -/// Also note that in this document, pallet attributes are explained using the syntax of -/// non-instantiable pallets. For an example of an instantiable pallet, see [this -/// example](#example-of-an-instantiable-pallet). -/// -/// # Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` on the `#[pallet]` or `#[frame_support::pallet]` -/// attribute attached to your pallet module will allow you to enable dev mode for a pallet. -/// The aim of dev mode is to loosen some of the restrictions and requirements placed on -/// production pallets for easy tinkering and development. Dev mode pallets should not be used -/// in production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not -/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not -/// specify a weight. -/// * All storages are marked as unbounded, meaning you do not need to implement -/// `MaxEncodedLen` on storage types. This is equivalent to specifying `#[pallet::unbounded]` -/// on all storage type definitions. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) -/// -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. -/// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. -/// -/// ## Macro expansion: -/// -/// The macro adds this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * [`GetStorageVersion`](`traits::GetStorageVersion`) -/// * [`OnGenesis`](`traits::OnGenesis`): contains some logic to write the pallet version into -/// storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// # Config trait: `#[pallet::config]` mandatory /// -/// It implements [`PalletInfoAccess`](`traits::PalletInfoAccess') on `Pallet` to ease access -/// to pallet information given by [`frame_support::traits::PalletInfo`]. (The implementation -/// uses the associated type `frame_system::Config::PalletInfo`). -/// -/// It implements [`StorageInfoTrait`](`traits::StorageInfoTrait`) on `Pallet` which give -/// information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for each storage in the implementation of -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet. Otherwise it implements -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet using the -/// [`PartialStorageInfoTrait`](`traits::PartialStorageInfoTrait`) implementation of storages. -/// -/// # Config trait: `#[pallet::config]` (mandatory) -/// -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the -/// pallet. -/// -/// Item must be defined as: +/// The trait defining generics of the pallet. /// +/// Item must be defined as /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config + $optionally_some_other_supertraits @@ -1579,103 +1416,74 @@ pub mod pallet_prelude { /// ... /// } /// ``` +/// I.e. a regular trait definition named `Config`, with supertrait `frame_system::Config`, +/// optionally other supertrait and where clause. /// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`frame_support::pallet_macros::event`) must be present if `RuntimeEvent` -/// exists as a config item in your `#[pallet::config]`. -/// -/// Also see [`pallet::config`](`frame_support::pallet_macros::config`) -/// -/// ## `pallet::constant` -/// -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by -/// [`Get`](crate::traits::Get) from [`pallet::config`](#palletconfig) into metadata, e.g.: +/// The associated type `Event` is reserved, if defined it must bounds `From` and +/// `IsType<::Event>`, see `#[pallet::event]` for more +/// information. /// +/// To put `Get` associated type into metadatas, use the attribute `#[pallet::constant]`, e.g.: /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; +/// #[pallet::constant] +/// type Foo: Get; /// } /// ``` /// -/// Also see [`pallet::constant`](`frame_support::pallet_macros::constant`) -/// -/// ## `pallet::disable_frame_system_supertrait_check` -///
-/// /// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: -/// +/// `#[pallet::disable_frame_system_supertrait_check]`, e.g.: /// ```ignore /// #[pallet::config] /// #[pallet::disable_frame_system_supertrait_check] /// pub trait Config: pallet_timestamp::Config {} /// ``` /// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. -/// -/// Also see -/// [`pallet::disable_frame_system_supertrait_check`](`frame_support::pallet_macros::disable_frame_system_supertrait_check`) -/// -/// ## Macro expansion: +/// ### Macro expansion: /// -/// The macro expands pallet constant metadata with the information given by +/// The macro expand pallet constant metadata with the information given by /// `#[pallet::constant]`. /// -/// # `pallet::generate_store($vis trait Store)` +/// # Pallet struct placeholder: `#[pallet::pallet]` mandatory /// -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: +/// The placeholder struct, on which is implemented pallet informations. /// +/// Item must be defined as followed: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// +/// To generate a `Store` trait associating all storages, use the attribute +/// `#[pallet::generate_store($vis trait Store)]`, e.g.: /// ```ignore /// #[pallet::pallet] /// #[pallet::generate_store(pub(super) trait Store)] /// pub struct Pallet(_); /// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. +/// More precisely the store trait contains an associated type for each storage. It is +/// implemented for `Pallet` allowing to access the storage from pallet struct. /// /// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using /// `::Foo`. /// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. -/// -/// Also see [`pallet::generate_store`](`frame_support::pallet_macros::generate_store`). -/// -/// # `pallet::generate_storage_info` -/// /// To generate the full storage info (used for PoV calculation) use the attribute /// `#[pallet::generate_storage_info]`, e.g.: -/// /// ```ignore /// #[pallet::pallet] /// #[pallet::generate_storage_info] /// pub struct Pallet(_); /// ``` /// -/// This requires all storage items to implement the trait [`traits::StorageInfoTrait`], thus -/// all keys and value types must be bound by [`pallet_prelude::MaxEncodedLen`]. Individual -/// storages can opt-out from this constraint by using `#[pallet::unbounded]` (see -/// `#[pallet::storage]` for more info). +/// This require all storage to implement the trait [`traits::StorageInfoTrait`], thus all keys +/// and value types must bound [`pallet_prelude::MaxEncodedLen`]. +/// Some individual storage can opt-out from this constraint by using `#[pallet::unbounded]`, +/// see `#[pallet::storage]` documentation. /// -/// Also see [`pallet::generate_storage_info`](`frame_support::pallet_macros::generate_storage_info`) -/// -/// # `pallet::storage_version` -/// -/// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro -/// implements [`traits::GetStorageVersion`], the current storage version needs to be -/// communicated to the macro. This can be done by using the `pallet::storage_version` -/// attribute: +/// As the macro implements [`traits::GetStorageVersion`], the current storage version needs to +/// be communicated to the macro. This can be done by using the `storage_version` attribute: /// /// ```ignore /// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); @@ -1687,34 +1495,75 @@ pub mod pallet_prelude { /// /// If not present, the current storage version is set to the default value. /// -/// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) +/// ### Macro expansion: +/// +/// The macro add this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replace the type `_` by `PhantomData`. +/// +/// It implements on pallet: +/// * [`traits::GetStorageVersion`] +/// * [`traits::OnGenesis`]: contains some logic to write pallet version into storage. +/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// +/// It declares `type Module` type alias for `Pallet`, used by [`construct_runtime`]. +/// +/// It implements [`traits::PalletInfoAccess`] on `Pallet` to ease access to pallet +/// informations given by [`frame_support::traits::PalletInfo`]. +/// (The implementation uses the associated type `frame_system::Config::PalletInfo`). +/// +/// It implements [`traits::StorageInfoTrait`] on `Pallet` which give information about all +/// storages. +/// +/// If the attribute generate_store is set then the macro creates the trait `Store` and +/// implements it on `Pallet`. +/// +/// If the attribute set_storage_max_encoded_len is set then the macro call +/// [`traits::StorageInfoTrait`] for each storage in the implementation of +/// [`traits::StorageInfoTrait`] for the pallet. +/// Otherwise it implements [`traits::StorageInfoTrait`] for the pallet using the +/// [`traits::PartialStorageInfoTrait`] implementation of storages. /// -/// # Hooks: `#[pallet::hooks]` (optional) +/// # Hooks: `#[pallet::hooks]` optional /// -/// The `pallet::hooks` attribute allows you to specify a `Hooks` implementation for `Pallet` -/// that specifies pallet-specific logic. +/// Implementation of `Hooks` on `Pallet` allowing to define some specific pallet logic. /// -/// The item the attribute attaches to must be defined as follows: +/// Item must be defined as /// ```ignore /// #[pallet::hooks] /// impl Hooks> for Pallet $optional_where_clause { -/// ... /// } /// ``` /// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. +/// `Hooks>` (they are defined in preludes), for the type `Pallet` +/// and with an optional where clause. /// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: +/// If no `#[pallet::hooks]` exists, then a default implementation corresponding to the +/// following code is automatically generated: /// ```ignore /// #[pallet::hooks] /// impl Hooks> for Pallet {} /// ``` /// -/// Also see [`pallet::hooks`](`frame_support::pallet_macros::hooks`) +/// ### Macro expansion: /// -/// # Call: `#[pallet::call]` (optional) +/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, +/// `OffchainWorker`, `IntegrityTest` using `Hooks` implementation. +/// +/// NOTE: OnRuntimeUpgrade is implemented with `Hooks::on_runtime_upgrade` and some additional +/// logic. E.g. logic to write pallet version into storage. +/// +/// NOTE: The macro also adds some tracing logic when implementing the above traits. The +/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +/// +/// # Call: `#[pallet::call]` optional /// /// Implementation of pallet dispatchables. /// @@ -1736,50 +1585,53 @@ pub mod pallet_prelude { /// } /// ``` /// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with -/// an optional where clause. -/// -/// ## `#[pallet::weight($expr)]` -/// -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. -/// -/// Also see [`pallet::weight`](`frame_support::pallet_macros::weight`) -/// -/// ### `#[pallet::compact] $some_arg: $some_type` -/// -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. -/// -/// Also see [`pallet::compact`](`frame_support::pallet_macros::compact`) +/// optional where clause. /// -/// ## `#[pallet::call_index($idx)]` +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, +/// the first argument must be `origin: OriginFor`, compact encoding for argument can be +/// used using `#[pallet::compact]`, function must return `DispatchResultWithPostInfo` or +/// `DispatchResult`. /// /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. +/// which defines and sets the codec index for the dispatchable function in the `Call` enum. /// /// All call indexes start from 0, until it encounters a dispatchable function with a defined /// call index. The dispatchable function that lexically follows the function with a defined /// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. +/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` has +/// a call index of 10, then `fn qux` will have an index of 11, instead of 1. +/// +/// All arguments must implement `Debug`, `PartialEq`, `Eq`, `Decode`, `Encode`, `Clone`. For +/// ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// +/// If no `#[pallet::call]` exists, then a default implementation corresponding to the +/// following code is automatically generated: +/// ```ignore +/// #[pallet::call] +/// impl Pallet {} +/// ``` /// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the +/// **WARNING**: modifying dispatchables, changing their order, removing some must be done with +/// care. Indeed this will change the outer runtime call type (which is an enum with one +/// variant per pallet), this outer runtime call can be stored on-chain (e.g. in +/// pallet-scheduler). Thus migration might be needed. To mitigate against some of this, the /// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. +/// that the `Call` enum encoding does not change after modification. /// -/// Also see [`pallet::call_index`](`frame_support::pallet_macros::call_index`) +/// ### Macro expansion /// -/// # Extra constants: `#[pallet::extra_constants]` (optional) +/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: +/// `Clone`, `Eq`, `PartialEq`, `Debug` (with stripped implementation in `not("std")`), +/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `UnfilteredDispatchable`. /// -/// Allows you to define some extra constants to be added into constant metadata. +/// The macro implement the `Callable` trait on `Pallet` and a function `call_functions` which +/// returns the dispatchable metadata. /// -/// Item must be defined as: +/// # Extra constants: `#[pallet::extra_constants]` optional +/// +/// Allow to define some extra constants to put into constant metadata. /// +/// Item must be defined as: /// ```ignore /// #[pallet::extra_constants] /// impl Pallet where $optional_where_clause { @@ -1790,23 +1642,19 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. -/// -/// ## Macro expansion +/// I.e. a regular rust implement block with some optional where clause and functions with 0 +/// args, 0 generics, and some return type. /// -/// The macro add some extra constants to pallet constant metadata. +/// ### Macro expansion /// -/// Also see: [`pallet::extra_constants`](`frame_support::pallet_macros::extra_constants`) +/// The macro add some extra constant to pallet constant metadata. /// -/// # Error: `#[pallet::error]` (optional) +/// # Error: `#[pallet::error]` optional /// -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. +/// Allow to define an error type to be return from dispatchable on error. +/// This error type informations are put into metadata. /// /// Item must be defined as: -/// /// ```ignore /// #[pallet::error] /// pub enum Error { @@ -1817,7 +1665,7 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field +/// I.e. a regular rust enum named `Error`, with generic `T` and fieldless or multiple-field /// variants. /// /// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to be @@ -1825,28 +1673,29 @@ pub mod pallet_prelude { /// preferably 1 byte in size in order to reduce storage size. The error enum itself has an /// absolute maximum encoded size specified by [`MAX_MODULE_ERROR_ENCODED_SIZE`]. /// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// /// Field types in enum variants must also implement [`PalletError`](traits::PalletError), /// otherwise the pallet will fail to compile. Rust primitive types have already implemented /// the [`PalletError`](traits::PalletError) trait along with some commonly used stdlib types -/// such as [`Option`] and [`PhantomData`](`frame_support::dispatch::marker::PhantomData`), and -/// hence in most use cases, a manual implementation is not necessary and is discouraged. +/// such as `Option` and `PhantomData`, and hence in most use cases, a manual implementation is +/// not necessary and is discouraged. +/// +/// The generic `T` mustn't bound anything and where clause is not allowed. But bounds and +/// where clause shouldn't be needed for any usecase. /// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. +/// ### Macro expansion /// -/// Also see: [`pallet::error`](`frame_support::pallet_macros::error`) +/// The macro implements `Debug` trait and functions `as_u8` using variant position, and +/// `as_str` using variant doc. /// -/// # Event: `#[pallet::event]` (optional) +/// The macro implements `From>` for `&'static str`. +/// The macro implements `From>` for `DispatchError`. /// -/// Allows you to define pallet events. Pallet events are stored under the `system` / `events` -/// key when the block is applied (and then replaced when the next block writes it's events). +/// # Event: `#[pallet::event]` optional /// -/// The Event enum must be defined as follows: +/// Allow to define pallet events, pallet events are stored in the block when they deposited +/// (and removed in next block). /// +/// Item is defined as: /// ```ignore /// #[pallet::event] /// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional @@ -1856,148 +1705,117 @@ pub mod pallet_prelude { /// ... /// } /// ``` +/// I.e. an enum (with named or unnamed fields variant), named Event, with generic: none or `T` +/// or `T: Config`, and optional where clause. /// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. +/// Each field must implement `Clone`, `Eq`, `PartialEq`, `Encode`, `Decode`, and `Debug` (on +/// std only). +/// For ease of use, bound the trait `Member` available in frame_support::pallet_prelude. /// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`Encode`], [`Decode`], and -/// [`Debug`] (on std only). For ease of use, bound by the trait -/// [`Member`](`frame_support::pallet_prelude::Member`), available in -/// frame_support::pallet_prelude. +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generate a helper +/// function on `Pallet` to deposit event. /// -/// Also see [`pallet::event`](`frame_support::pallet_macros::event`) +/// NOTE: For instantiable pallet, event must be generic over T and I. /// -/// ## `#[pallet::generate_deposit($visibility fn deposit_event)]` +/// ### Macro expansion: /// -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. +/// Macro will add on enum `Event` the attributes: +/// * `#[derive(frame_support::CloneNoBound)]`, +/// * `#[derive(frame_support::EqNoBound)]`, +/// * `#[derive(frame_support::PartialEqNoBound)]`, +/// * `#[derive(codec::Encode)]`, +/// * `#[derive(codec::Decode)]`, +/// * `#[derive(frame_support::RuntimeDebugNoBound)]` /// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. +/// Macro implements `From>` for (). /// -/// Also see [`pallet::generate_deposit`](`frame_support::pallet_macros::generate_deposit`) +/// Macro implements metadata function on `Event` returning the `EventMetadata`. /// -/// # Storage: `#[pallet::storage]` (optional) +/// If `#[pallet::generate_deposit]` then macro implement `fn deposit_event` on `Pallet`. /// -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. +/// # Storage: `#[pallet::storage]` optional /// -/// Item should be defined as: +/// Allow to define some abstract storage inside runtime storage and also set its metadata. +/// This attribute can be used multiple times. /// +/// Item is defined as: /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn $getter_name)] // optional /// $vis type $StorageName<$some_generic> $optional_where_clause /// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; /// ``` -/// -/// or with unnamed generic: -/// +/// or with unnamed generic /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn $getter_name)] // optional /// $vis type $StorageName<$some_generic> $optional_where_clause /// = $StorageType<_, $some_generics, ...>; /// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of [`StorageValue`](`pallet_prelude::StorageValue`), -/// [`StorageMap`](`pallet_prelude::StorageMap`) or -/// [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`). The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * [`StorageValue`](`pallet_prelude::StorageValue`) expects `Value` and optionally +/// I.e. it must be a type alias, with generics: `T` or `T: Config`, aliased type must be one +/// of `StorageValue`, `StorageMap` or `StorageDoubleMap` (defined in frame_support). +/// The generic arguments of the storage type can be given in two manner: named and unnamed. +/// For named generic argument: the name for each argument is the one as define on the storage +/// struct: +/// * [`pallet_prelude::StorageValue`] expect `Value` and optionally `QueryKind` and `OnEmpty`, +/// * [`pallet_prelude::StorageMap`] expect `Hasher`, `Key`, `Value` and optionally `QueryKind` +/// and `OnEmpty`, +/// * [`pallet_prelude::CountedStorageMap`] expect `Hasher`, `Key`, `Value` and optionally /// `QueryKind` and `OnEmpty`, -/// * [`StorageMap`](`pallet_prelude::StorageMap`) expects `Hasher`, `Key`, `Value` and -/// optionally `QueryKind` and `OnEmpty`, -/// * [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) expects `Hasher`, `Key`, -/// `Value` and optionally `QueryKind` and `OnEmpty`, -/// * [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`) expects `Hasher1`, `Key1`, -/// `Hasher2`, `Key2`, `Value` and optionally `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) variant, the `Prefix` -/// also implements -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`). -/// It also associates a [`CounterPrefix`](`pallet_prelude::CounterPrefix'), which is -/// implemented the same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. -/// if runtime names the pallet "MyExample" then the storage `type Foo = -/// CountedStorageaMap<...>` will store its counter at the prefix: `Twox128(b"MyExample") ++ -/// Twox128(b"CounterForFoo")`. +/// * [`pallet_prelude::StorageDoubleMap`] expect `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` +/// and optionally `QueryKind` and `OnEmpty`. /// -/// E.g: +/// For unnamed generic argument: Their first generic must be `_` as it is replaced by the +/// macro and other generic must declared as a normal declaration of type generic in rust. +/// +/// The Prefix generic written by the macro is generated using +/// `PalletInfo::name::>()` and the name of the storage type. +/// E.g. if runtime names the pallet "MyExample" then the storage `type Foo = ...` use the +/// prefix: `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// For the `CountedStorageMap` variant, the Prefix also implements +/// `CountedStorageMapInstance`. It associate a `CounterPrefix`, which is implemented same as +/// above, but the storage prefix is prepend with `"CounterFor"`. +/// E.g. if runtime names the pallet "MyExample" then the storage +/// `type Foo = CountedStorageaMap<...>` will store its counter at the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. /// +/// E.g: /// ```ignore /// #[pallet::storage] /// pub(super) type MyStorage = StorageMap; /// ``` +/// In this case the final prefix used by the map is +/// `Twox128(b"MyExample") ++ Twox128(b"OtherName")`. /// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. -/// -/// Also see [`pallet::storage`](`frame_support::pallet_macros::storage`) -/// -/// ## `#[pallet::getter(fn $my_getter_fn_name)]` (optional) -/// -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows to define a /// getter function on `Pallet`. /// -/// Also see [`pallet::getter`](`frame_support::pallet_macros::getter`) -/// -/// ## `#[pallet::storage_prefix = "SomeName"]` (optional) -/// -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use, see how `Prefix` generic is implemented above. This is helpful if -/// you wish to rename the storage field but don't want to perform a migration. +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allow to define the storage +/// prefix to use, see how `Prefix` generic is implemented above. /// /// E.g: -/// /// ```ignore /// #[pallet::storage] /// #[pallet::storage_prefix = "foo"] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap; /// ``` -/// /// or -/// /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; /// ``` /// -/// Also see [`pallet::storage_prefix`](`frame_support::pallet_macros::storage_prefix`) -/// -/// ## `#[pallet::unbounded]` (optional) +/// The optional attribute `#[pallet::unbounded]` allows to declare the storage as unbounded. +/// When implementating the storage info (when `#[pallet::generate_storage_info]` is specified +/// on the pallet struct placeholder), the size of the storage will be declared as unbounded. +/// This can be useful for storage which can never go into PoV (Proof of Validity). /// -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). -/// -/// Also see [`pallet::unbounded`](`frame_support::pallet_macros::unbounded`) -/// -/// ## `#[pallet::whitelist_storage]` (optional) -/// -/// The optional attribute `#[pallet::whitelist_storage]` will declare the storage as -/// whitelisted from benchmarking. -/// -/// See -/// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) -/// for more info. -/// -/// ## `#[cfg(..)]` (for storage) /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// /// E.g: -/// /// ```ignore /// #[cfg(feature = "my-feature")] /// #[pallet::storage] @@ -2007,24 +1825,6 @@ pub mod pallet_prelude { /// All the `cfg` attributes are automatically copied to the items generated for the storage, /// i.e. the getter, storage prefix, and the metadata element etc. /// -/// Any type placed as the `QueryKind` parameter must implement -/// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this -/// trait by default: -/// -/// 1. [`OptionQuery`](`frame_support::storage::types::OptionQuery`), the default `QueryKind` -/// used when this type parameter is omitted. Specifying this as the `QueryKind` would cause -/// storage map APIs that return a `QueryKind` to instead return an [`Option`], returning -/// `Some` when a value does exist under a specified storage key, and `None` otherwise. -/// 2. [`ValueQuery`](`frame_support::storage::types::ValueQuery`) causes storage map APIs that -/// return a `QueryKind` to instead return the value type. In cases where a value does not -/// exist under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is -/// used to return an appropriate value. -/// 3. [`ResultQuery`](`frame_support::storage::types::ResultQuery`) causes storage map APIs -/// that return a `QueryKind` to instead return a `Result`, with `T` being the value -/// type and `E` being the pallet error type specified by the `#[pallet::error]` attribute. -/// In cases where a value does not exist under a specified storage key, an `Err` with the -/// specified pallet error variant is returned. -/// /// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some /// type alias then the generation of the getter might fail. In this case the getter can be /// implemented manually. @@ -2033,62 +1833,60 @@ pub mod pallet_prelude { /// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the /// storage item. Thus generic hasher is supported. /// -/// ## Macro expansion +/// ### Macro expansion /// /// For each storage item the macro generates a struct named /// `_GeneratedPrefixForStorage$NameOfStorage`, and implements /// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It -/// then uses it as the first generic of the aliased type. For -/// [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`), -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`) -/// is implemented, and another similar struct is generated. +/// then uses it as the first generic of the aliased type. +/// For `CountedStorageMap`, `CountedStorageMapInstance` is implemented, and another similar +/// struct is generated. /// -/// For a named generic, the macro will reorder the generics, and remove the names. +/// For named generic, the macro will reorder the generics, and remove the names. /// -/// The macro implements the function `storage_metadata` on the `Pallet` implementing the -/// metadata for all storage items based on their kind: +/// The macro implements the function `storage_metadata` on `Pallet` implementing the metadata +/// for all storage items based on their kind: /// * for a storage value, the type of the value is copied into the metadata /// * for a storage map, the type of the values and the key's type is copied into the metadata -/// * for a storage double map, the type of the values, and the types of `key1` and `key2` are +/// * for a storage double map, the type of the values, and the types of key1 and key2 are /// copied into the metadata. /// -/// # Type value: `#[pallet::type_value]` (optional) +/// # Type value: `#[pallet::type_value]` optional /// -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the -/// [`Get`](crate::traits::Get) trait to ease use of storage types. This attribute is meant to -/// be used alongside [`#[pallet::storage]`](#storage-palletstorage-optional) to define a -/// storage's default value. This attribute can be used multiple times. -/// -/// Item must be defined as: +/// Helper to define a struct implementing `Get` trait. To ease use of storage types. +/// This attribute can be used multiple time. /// +/// Item is defined as /// ```ignore /// #[pallet::type_value] /// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } /// ``` -/// /// I.e.: a function definition with generics none or `T: Config` and a returned type. /// /// E.g.: -/// /// ```ignore /// #[pallet::type_value] /// fn MyDefault() -> T::Balance { 3.into() } /// ``` /// -/// Also see [`pallet::type_value`](`frame_support::pallet_macros::type_value`) +/// NOTE: This attribute is meant to be used alongside `#[pallet::storage]` to defined some +/// specific default value in storage. /// -/// # Genesis config: `#[pallet::genesis_config]` (optional) +/// ### Macro expansion /// -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. +/// Macro renames the function to some internal name, generate a struct with the original name +/// of the function and its generic, and implement `Get<$ReturnType>` by calling the user +/// defined function. /// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait [`GenesisBuild`](`traits::GenesisBuild`) with -/// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type -/// generics are constrained to be either none, or `T` or `T: Config`. +/// # Genesis config: `#[pallet::genesis_config]` optional /// -/// E.g: +/// Allow to define the genesis configuration of the pallet. /// +/// Item is defined as either an enum or a struct. +/// It needs to be public and implement trait GenesisBuild with `#[pallet::genesis_build]`. +/// The type generics is constrained to be either none, or `T` or `T: Config`. +/// +/// E.g: /// ```ignore /// #[pallet::genesis_config] /// pub struct GenesisConfig { @@ -2096,28 +1894,31 @@ pub mod pallet_prelude { /// } /// ``` /// -/// Also see [`pallet::genesis_config`](`frame_support::pallet_macros::genesis_config`) +/// ### Macro expansion /// -/// # Genesis build: `#[pallet::genesis_build]` (optional) +/// Macro will add the following attribute on it: +/// * `#[cfg(feature = "std")]` +/// * `#[derive(Serialize, Deserialize)]` +/// * `#[serde(rename_all = "camelCase")]` +/// * `#[serde(deny_unknown_fields)]` +/// * `#[serde(bound(serialize = ""))]` +/// * `#[serde(bound(deserialize = ""))]` /// -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the -/// pallet's initial state. +/// # Genesis build: `#[pallet::genesis_build]` optional /// -/// The impl must be defined as: +/// Allow to define how genesis_configuration is built. /// +/// Item is defined as /// ```ignore /// #[pallet::genesis_build] /// impl GenesisBuild for GenesisConfig<$maybe_generics> { /// fn build(&self) { $expr } /// } /// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on +/// I.e. a rust trait implementation with generic `T: Config`, of trait `GenesisBuild` on /// type `GenesisConfig` with generics none or `T`. /// /// E.g.: -/// /// ```ignore /// #[pallet::genesis_build] /// impl GenesisBuild for GenesisConfig { @@ -2125,93 +1926,87 @@ pub mod pallet_prelude { /// } /// ``` /// -/// Also see [`pallet::genesis_build`](`frame_support::pallet_macros::genesis_build`) +/// ### Macro expansion /// -/// # Inherent: `#[pallet::inherent]` (optional) +/// Macro will add the following attribute on it: +/// * `#[cfg(feature = "std")]` /// -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. +/// Macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as second generic +/// for non-instantiable pallets. /// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. +/// # Inherent: `#[pallet::inherent]` optional /// -/// Item must be defined as: +/// Allow the pallet to provide some inherent: /// +/// Item is defined as: /// ```ignore /// #[pallet::inherent] /// impl ProvideInherent for Pallet { /// // ... regular trait implementation /// } /// ``` +/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type +/// `Pallet`, and some optional where clause. /// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) for type `Pallet`, and some -/// optional where clause. +/// ### Macro expansion /// -/// Also see [`pallet::inherent`](`frame_support::pallet_macros::inherent`) +/// Macro make currently no use of this information, but it might use this information in the +/// future to give information directly to construct_runtime. /// -/// # Validate unsigned: `#[pallet::validate_unsigned]` (optional) +/// # Validate unsigned: `#[pallet::validate_unsigned]` optional /// -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: +/// Allow the pallet to validate some unsigned transaction: /// +/// Item is defined as: /// ```ignore /// #[pallet::validate_unsigned] /// impl ValidateUnsigned for Pallet { /// // ... regular trait implementation /// } /// ``` +/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type +/// `Pallet`, and some optional where clause. /// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) for type `Pallet`, and some -/// optional where clause. +/// NOTE: There is also `sp_runtime::traits::SignedExtension` that can be used to add some +/// specific logic for transaction validation. /// -/// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used to -/// add some specific logic for transaction validation. +/// ### Macro expansion /// -/// Also see [`pallet::validate_unsigned`](`frame_support::pallet_macros::validate_unsigned`) +/// Macro make currently no use of this information, but it might use this information in the +/// future to give information directly to construct_runtime. /// -/// # Origin: `#[pallet::origin]` (optional) +/// # Origin: `#[pallet::origin]` optional /// -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. +/// Allow to define some origin for the pallet. /// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. +/// Item must be either a type alias or an enum or a struct. It needs to be public. /// /// E.g.: -/// /// ```ignore /// #[pallet::origin] /// pub struct Origin(PhantomData<(T)>); /// ``` /// /// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care +/// can be stored on-chain (e.g. in pallet-scheduler), thus any change must be done with care /// as it might require some migration. /// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +/// NOTE: for instantiable pallet, origin must be generic over T and I. /// -/// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) +/// # General notes on instantiable pallet /// -/// # General notes on instantiable pallets +/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allow runtime +/// to implement multiple instance of the pallet, by using different type for the generic. +/// This is the sole purpose of the generic `I`. +/// But because `PalletInfo` requires `Pallet` placeholder to be static it is important to +/// bound `'static` whenever `PalletInfo` can be used. +/// And in order to have instantiable pallet usable as a regular pallet without instance, it is +/// important to bound `= ()` on every types. /// -/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows -/// runtime to implement multiple instances of the pallet, by using different types for the -/// generic. This is the sole purpose of the generic `I`, but because -/// [`PalletInfo`](`traits::PalletInfo`) requires the `Pallet` placeholder to be static, it is -/// important to bound by `'static` whenever [`PalletInfo`](`traits::PalletInfo`) can be used. -/// Additionally, in order to make an instantiable pallet usable as a regular pallet without an -/// instance, it is important to bound by `= ()` on every type. -/// -/// Thus impl bound looks like `impl, I: 'static>`, and types look like +/// Thus impl bound look like `impl, I: 'static>`, and types look like /// `SomeType` or `SomeType, I: 'static = ()>`. /// -/// # Example of a non-instantiable pallet +/// # Example for pallet without instance. /// /// ``` /// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` @@ -2233,7 +2028,7 @@ pub mod pallet_prelude { /// #[pallet::constant] // put the constant in metadata /// type MyGetParam: Get; /// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; +/// type Event: From> + IsType<::Event>; /// } /// /// // Define some additional constant to put into the constant metadata. @@ -2405,7 +2200,7 @@ pub mod pallet_prelude { /// } /// ``` /// -/// # Example of an instantiable pallet +/// # Example for pallet with instance. /// /// ``` /// pub use pallet::*; @@ -2422,7 +2217,7 @@ pub mod pallet_prelude { /// #[pallet::constant] /// type MyGetParam: Get; /// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; +/// type Event: From> + IsType<::Event>; /// } /// /// #[pallet::extra_constants] @@ -2536,28 +2331,28 @@ pub mod pallet_prelude { /// } /// ``` /// -/// # Upgrade guidelines +/// ## Upgrade guidelines: /// /// 1. Export the metadata of the pallet for later checks /// - run your node with the pallet active /// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p /// > meta.json` -/// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the -/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p -/// my_pallet`. This template can be used as it contains all information for storages, -/// genesis config and genesis build. -/// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one -/// file. Suggested order: -/// * `Config`, -/// * `decl_module`, -/// * `decl_event`, -/// * `decl_error`, -/// * `decl_storage`, -/// * `origin`, -/// * `validate_unsigned`, -/// * `provide_inherent`, so far it should compile and all be correct. +/// 2. generate the template upgrade for the pallet provided by decl_storage +/// with environment variable `PRINT_PALLET_UPGRADE`: +/// `PRINT_PALLET_UPGRADE=1 cargo check -p my_pallet` This template can be +/// used as information it contains all information for storages, genesis +/// config and genesis build. +/// 3. reorganize pallet to have trait `Config`, `decl_*` macros, `ValidateUnsigned`, +/// `ProvideInherent`, `Origin` all together in one file. Suggested order: +/// * Config, +/// * decl_module, +/// * decl_event, +/// * decl_error, +/// * decl_storage, +/// * origin, +/// * validate_unsigned, +/// * provide_inherent, +/// so far it should compile and all be correct. /// 4. start writing the new pallet module /// ```ignore /// pub use pallet::*; @@ -2577,17 +2372,16 @@ pub mod pallet_prelude { /// } /// ``` /// 5. **migrate Config**: move trait into the module with -/// * all const in `decl_module` to [`#[pallet::constant]`](#palletconstant) -/// * add the bound `IsType<::RuntimeEvent>` to `type -/// RuntimeEvent` +/// * all const in decl_module to `#[pallet::constant]` +/// * add bound `IsType<::Event>` to `type Event` /// 7. **migrate decl_module**: write: /// ```ignore /// #[pallet::hooks] /// impl Hooks for Pallet { /// } /// ``` -/// and write inside `on_initialize`, `on_finalize`, `on_runtime_upgrade`, -/// `offchain_worker`, and `integrity_test`. +/// and write inside +/// `on_initialize`, `on_finalize`, `on_runtime_upgrade`, `offchain_worker`, `integrity_test`. /// /// then write: /// ```ignore @@ -2595,26 +2389,25 @@ pub mod pallet_prelude { /// impl Pallet { /// } /// ``` -/// and write inside all the calls in `decl_module` with a few changes in the signature: -/// - origin must now be written completely, e.g. `origin: OriginFor` -/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you -/// might need to put `Ok(().into())` at the end or the function. -/// - `#[compact]` must now be written -/// [`#[pallet::compact]`](#palletcompact-some_arg-some_type) -/// - `#[weight = ..]` must now be written [`#[pallet::weight(..)]`](#palletweightexpr) -/// -/// 7. **migrate event**: rewrite as a simple enum with the attribute -/// [`#[pallet::event]`](#event-palletevent-optional), use [`#[pallet::generate_deposit($vis -/// fn deposit_event)]`](#event-palletevent-optional) to generate `deposit_event`, -/// 8. **migrate error**: rewrite it with attribute -/// [`#[pallet::error]`](#error-palleterror-optional). -/// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, -/// genesis config, genesis build and default implementation of genesis config can be -/// taken from it directly. -/// -/// Otherwise here is the manual process: -/// -/// first migrate the genesis logic. write: +/// and write inside all the calls in decl_module with a few changes in the signature: +/// - origin must now be written completely, e.g. `origin: OriginFor` +/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you +/// might +/// need to put `Ok(().into())` at the end or the function. +/// - `#[compact]` must now be written `#[pallet::compact]` +/// - `#[weight = ..]` must now be written `#[pallet::weight(..)]` +/// +/// 7. **migrate event**: +/// rewrite as a simple enum under with the attribute `#[pallet::event]`, +/// use `#[pallet::generate_deposit($vis fn deposit_event)]` to generate deposit_event, +/// 8. **migrate error**: rewrite it with attribute `#[pallet::error]`. +/// 9. **migrate storage**: +/// decl_storage provide an upgrade template (see 3.). All storages, genesis config, genesis +/// build and default implementation of genesis config can be taken from it directly. +/// +/// Otherwise here is the manual process: +/// +/// first migrate the genesis logic. write: /// ```ignore /// #[pallet::genesis_config] /// struct GenesisConfig { @@ -2632,85 +2425,79 @@ pub mod pallet_prelude { /// } /// } /// ``` -/// for each storage, if it contains `config(..)` then add fields, and make it default to -/// the value in `= ..;` or the type default if none, if it contains no build then also add -/// the logic to build the value. for each storage if it contains `build(..)` then add the -/// logic to `genesis_build`. -/// -/// NOTE: within `decl_storage`: the individual config is executed first, followed by the -/// build and finally the `add_extra_genesis` build. -/// -/// Once this is done you can migrate storages individually, a few notes: -/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, -/// - for storages with `get(fn ..)` use [`#[pallet::getter(fn -/// ...)]`](#palletgetterfn-my_getter_fn_name-optional) -/// - for storages with value being `Option<$something>` make generic `Value` being -/// `$something` and generic `QueryKind` being `OptionQuery` (note: this is default). -/// Otherwise make `Value` the complete value type and `QueryKind` being `ValueQuery`. -/// - for storages with default value: `= $expr;` provide some specific `OnEmpty` generic. -/// To do so use of `#[pallet::type_value]` to generate the wanted struct to put. -/// example: `MyStorage: u32 = 3u32` would be written: -/// -/// ```ignore +/// for each storages, if it contains config(..) then add a fields, and make its default to the +/// value in `= ..;` or the type default if none, if it contains no build then also add the +/// logic to build the value. +/// for each storages if it contains build(..) then add the logic to genesis_build. +/// +/// NOTE: in decl_storage: is executed first the individual config and build and at the end the +/// add_extra_genesis build +/// +/// Once this is done you can migrate storage individually, a few notes: +/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, +/// - for storage with `get(fn ..)` use `#[pallet::getter(fn ...)]` +/// - for storage with value being `Option<$something>` make generic `Value` being +/// `$something` +/// and generic `QueryKind` being `OptionQuery` (note: this is default). Otherwise make +/// `Value` the complete value type and `QueryKind` being `ValueQuery`. +/// - for storage with default value: `= $expr;` provide some specific OnEmpty generic. To do +/// so +/// use of `#[pallet::type_value]` to generate the wanted struct to put. +/// example: `MyStorage: u32 = 3u32` would be written: +/// ```ignore /// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } /// #[pallet::storage] /// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; /// ``` /// -/// NOTE: `decl_storage` also generates the functions `assimilate_storage` and -/// `build_storage` directly on `GenesisConfig`, and these are sometimes used in tests. -/// In order not to break they can be implemented manually, one can implement those -/// functions by calling the `GenesisBuild` implementation. -/// 10. **migrate origin**: move the origin to the pallet module to be under a -/// [`#[pallet::origin]`](#origin-palletorigin-optional) attribute -/// 11. **migrate validate_unsigned**: move the -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) implementation to the pallet -/// module under a -/// [`#[pallet::validate_unsigned]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// attribute -/// 12. **migrate provide_inherent**: move the -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) implementation to the pallet -/// module under a [`#[pallet::inherent]`](#inherent-palletinherent-optional) attribute +/// NOTE: `decl_storage` also generates functions `assimilate_storage` and `build_storage` +/// directly on GenesisConfig, those are sometimes used in tests. In order not to break they +/// can be implemented manually, one can implement those functions by calling `GenesisBuild` +/// implementation. +/// +/// 10. **migrate origin**: move the origin to the pallet module under `#[pallet::origin]` +/// 11. **migrate validate_unsigned**: move the `ValidateUnsigned` implementation to the pallet +/// module under `#[pallet::validate_unsigned]` +/// 12. **migrate provide_inherent**: move the `ProvideInherent` implementation to the pallet +/// module under `#[pallet::inherent]` /// 13. rename the usage of `Module` to `Pallet` inside the crate. -/// 14. migration is done, now double check the migration with the checking migration -/// guidelines shown below. +/// 14. migration is done, now double check migration with the checking migration guidelines. /// -/// # Checking upgrade guidelines: +/// ## Checking upgrade guidelines: /// /// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata -/// and do a diff of the resulting json before and after migration. This checks for: -/// * call, names, signature, docs -/// * event names, docs -/// * error names, docs -/// * storage names, hasher, prefixes, default value -/// * error, error, constant +/// and do a diff of the resulting json before and after migration. This checks for: +/// * call, names, signature, docs +/// * event names, docs +/// * error names, docs +/// * storage names, hasher, prefixes, default value +/// * error , error, constant, /// * manually check that: -/// * `Origin` was moved inside the macro under -/// [`#[pallet::origin]`](#origin-palletorigin-optional) if it exists -/// * [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) was moved inside the macro -/// under -/// [`#[pallet::validate_unsigned)]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// if it exists -/// * [`ProvideInherent`](`pallet_prelude::ProvideInherent`) was moved inside the macro -/// under [`#[pallet::inherent)]`](#inherent-palletinherent-optional) if it exists -/// * `on_initialize` / `on_finalize` / `on_runtime_upgrade` / `offchain_worker` were moved -/// to the `Hooks` implementation -/// * storages with `config(..)` were converted to `GenesisConfig` field, and their default -/// is `= $expr;` if the storage has a default value -/// * storages with `build($expr)` or `config(..)` were built in `GenesisBuild::build` -/// * `add_extra_genesis` fields were converted to `GenesisConfig` field with their correct -/// default if specified -/// * `add_extra_genesis` build was written into `GenesisBuild::build` +/// * `Origin` is moved inside the macro under `#[pallet::origin]` if it exists +/// * `ValidateUnsigned` is moved inside the macro under `#[pallet::validate_unsigned)]` if it +/// exists +/// * `ProvideInherent` is moved inside macro under `#[pallet::inherent)]` if it exists +/// * `on_initialize`/`on_finalize`/`on_runtime_upgrade`/`offchain_worker` are moved to +/// `Hooks` +/// implementation +/// * storages with `config(..)` are converted to `GenesisConfig` field, and their default is +/// `= $expr;` if the storage have default value +/// * storages with `build($expr)` or `config(..)` are built in `GenesisBuild::build` +/// * `add_extra_genesis` fields are converted to `GenesisConfig` field with their correct +/// default if specified +/// * `add_extra_genesis` build is written into `GenesisBuild::build` /// * storage items defined with [`pallet`] use the name of the pallet provided by -/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used -/// the `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). Thus -/// a runtime using the pallet must be careful with this change. To handle this change: -/// * either ensure that the name of the pallet given to `construct_runtime!` is the same -/// as the name the pallet was giving to `decl_storage`, -/// * or do a storage migration from the old prefix used to the new prefix used. -/// -/// NOTE: The prefixes used by storage items are in metadata. Thus, ensuring the metadata -/// hasn't changed ensures that the `pallet_prefix`s used by the storage items haven't changed. +/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used the +/// `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). +/// Thus a runtime using the pallet must be careful with this change. +/// To handle this change: +/// * either ensure that the name of the pallet given to `construct_runtime!` is the same +/// as the name the pallet was giving to `decl_storage`, +/// * or do a storage migration from the old prefix used to the new prefix used. +/// +/// NOTE: The prefixes used by storage items are in the metadata. Thus, ensuring the metadata +/// hasn't changed does ensure that the `pallet_prefix`s used by the storage items haven't +/// changed. /// /// # Notes when macro fails to show proper error message spans: /// @@ -2725,13 +2512,3 @@ pub mod pallet_prelude { /// ``` /// * use the newest nightly possible. pub use frame_support_procedural::pallet; - -/// Contains macro stubs for all of the pallet:: macros -pub mod pallet_macros { - pub use frame_support_procedural::{ - call_index, compact, config, constant, disable_frame_system_supertrait_check, error, event, - extra_constants, generate_deposit, generate_storage_info, generate_store, genesis_build, - genesis_config, getter, hooks, inherent, origin, storage, storage_prefix, storage_version, - type_value, unbounded, validate_unsigned, weight, whitelist_storage, - }; -} diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index 63679fd56d667..05833e0515c07 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -19,7 +19,6 @@ use crate::{ traits::{GetStorageVersion, PalletInfoAccess}, weights::{RuntimeDbWeight, Weight}, }; -use impl_trait_for_tuples::impl_for_tuples; /// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. pub trait PalletVersionToStorageVersionHelper { @@ -43,12 +42,10 @@ impl PalletVersionToStorageVersionHelpe } } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(30)] impl PalletVersionToStorageVersionHelper for T { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { - let mut weight = Weight::zero(); + let mut weight: Weight = 0; for_tuples!( #( weight = weight.saturating_add(T::migrate(db_weight)); )* ); diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index c95dcee9d7e5c..4ffe32651a9cc 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -512,14 +512,14 @@ mod test_iterators { use codec::{Decode, Encode}; pub trait Config: 'static { - type RuntimeOrigin; + type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index f6c8eaa270bb3..d190145ea4c00 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -354,14 +354,14 @@ mod test_iterators { use codec::{Decode, Encode}; pub trait Config: 'static { - type RuntimeOrigin; + type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 334e9b9e24e86..ca893f44b3cb0 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -47,21 +47,21 @@ mod tests { struct Runtime; pub trait Config: 'static { - type RuntimeOrigin; + type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } impl Config for Runtime { - type RuntimeOrigin = u32; + type Origin = u32; type BlockNumber = u32; type PalletInfo = crate::tests::PanicPalletInfo; type DbWeight = (); } decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } crate::decl_storage! { diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 79f3d72044e28..f1d0f9a5f0801 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -460,14 +460,14 @@ mod test_iterators { use codec::{Decode, Encode}; pub trait Config: 'static { - type RuntimeOrigin; + type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 333f4382557b1..d9e50a1e1345e 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -27,7 +27,7 @@ use crate::{ use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; use sp_core::storage::ChildInfo; use sp_runtime::generic::{Digest, DigestItem}; -use sp_std::{collections::btree_set::BTreeSet, marker::PhantomData, prelude::*}; +use sp_std::{marker::PhantomData, prelude::*}; pub use self::{ transactional::{ @@ -46,7 +46,6 @@ pub mod child; pub mod generator; pub mod hashed; pub mod migration; -pub mod storage_noop_guard; pub mod transactional; pub mod types; pub mod unhashed; @@ -1303,7 +1302,6 @@ mod private { impl Sealed for WeakBoundedVec {} impl Sealed for bounded_btree_map::BoundedBTreeMap {} impl Sealed for bounded_btree_set::BoundedBTreeSet {} - impl Sealed for BTreeSet {} macro_rules! impl_sealed_for_tuple { ($($elem:ident),+) => { @@ -1336,9 +1334,6 @@ mod private { impl StorageAppend for Vec {} impl StorageDecodeLength for Vec {} -impl StorageAppend for BTreeSet {} -impl StorageDecodeLength for BTreeSet {} - /// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the /// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` /// format ever changes, we need to remove this here. @@ -1836,22 +1831,4 @@ mod test { ); }); } - - #[crate::storage_alias] - type FooSet = StorageValue>; - - #[test] - fn btree_set_append_and_decode_len_works() { - TestExternalities::default().execute_with(|| { - let btree = BTreeSet::from([1, 2, 3]); - FooSet::put(btree); - - FooSet::append(4); - FooSet::append(5); - FooSet::append(6); - FooSet::append(7); - - assert_eq!(FooSet::decode_len().unwrap(), 7); - }); - } } diff --git a/frame/support/src/storage/storage_noop_guard.rs b/frame/support/src/storage/storage_noop_guard.rs deleted file mode 100644 index 7186c3eaf467a..0000000000000 --- a/frame/support/src/storage/storage_noop_guard.rs +++ /dev/null @@ -1,114 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Feature gated since it can panic. -#![cfg(any(feature = "std", feature = "runtime-benchmarks", feature = "try-runtime", test))] - -//! Contains the [`crate::StorageNoopGuard`] for conveniently asserting -//! that no storage mutation has been made by a whole code block. - -/// Asserts that no storage changes took place between con- and destruction of [`Self`]. -/// -/// This is easier than wrapping the whole code-block inside a `assert_storage_noop!`. -/// -/// # Example -/// -/// ```should_panic -/// use frame_support::{StorageNoopGuard, storage::unhashed::put}; -/// -/// sp_io::TestExternalities::default().execute_with(|| { -/// let _guard = frame_support::StorageNoopGuard::default(); -/// put(b"key", b"value"); -/// // Panics since there are storage changes. -/// }); -/// ``` -#[must_use] -pub struct StorageNoopGuard(sp_std::vec::Vec); - -impl Default for StorageNoopGuard { - fn default() -> Self { - Self(frame_support::storage_root(frame_support::StateVersion::V1)) - } -} - -impl Drop for StorageNoopGuard { - fn drop(&mut self) { - // No need to double panic, eg. inside a test assertion failure. - if sp_std::thread::panicking() { - return - } - assert_eq!( - frame_support::storage_root(frame_support::StateVersion::V1), - self.0, - "StorageNoopGuard detected wrongful storage changes.", - ); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_io::TestExternalities; - - #[test] - #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] - fn storage_noop_guard_panics_on_changed() { - TestExternalities::default().execute_with(|| { - let _guard = StorageNoopGuard::default(); - frame_support::storage::unhashed::put(b"key", b"value"); - }); - } - - #[test] - fn storage_noop_guard_works_on_unchanged() { - TestExternalities::default().execute_with(|| { - let _guard = StorageNoopGuard::default(); - frame_support::storage::unhashed::put(b"key", b"value"); - frame_support::storage::unhashed::kill(b"key"); - }); - } - - #[test] - #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] - fn storage_noop_guard_panics_on_early_drop() { - TestExternalities::default().execute_with(|| { - let guard = StorageNoopGuard::default(); - frame_support::storage::unhashed::put(b"key", b"value"); - sp_std::mem::drop(guard); - frame_support::storage::unhashed::kill(b"key"); - }); - } - - #[test] - fn storage_noop_guard_works_on_changed_forget() { - TestExternalities::default().execute_with(|| { - let guard = StorageNoopGuard::default(); - frame_support::storage::unhashed::put(b"key", b"value"); - sp_std::mem::forget(guard); - }); - } - - #[test] - #[should_panic(expected = "Something else")] - fn storage_noop_guard_does_not_double_panic() { - TestExternalities::default().execute_with(|| { - let _guard = StorageNoopGuard::default(); - frame_support::storage::unhashed::put(b"key", b"value"); - panic!("Something else"); - }); - } -} diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index 8c19434767f49..c4027acfe7232 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -143,7 +143,10 @@ where } /// Store a value to be associated with the given key from the map. - pub fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { + pub fn insert + Clone, ValArg: EncodeLike>( + key: KeyArg, + val: ValArg, + ) { if !::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_inc()); } @@ -151,7 +154,7 @@ where } /// Remove the value under a key. - pub fn remove>(key: KeyArg) { + pub fn remove + Clone>(key: KeyArg) { if ::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_dec()); } @@ -159,7 +162,7 @@ where } /// Mutate the value under a key. - pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( + pub fn mutate + Clone, R, F: FnOnce(&mut QueryKind::Query) -> R>( key: KeyArg, f: F, ) -> R { @@ -170,7 +173,7 @@ where /// Mutate the item, only if an `Ok` value is returned. pub fn try_mutate(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike, + KeyArg: EncodeLike + Clone, F: FnOnce(&mut QueryKind::Query) -> Result, { Self::try_mutate_exists(key, |option_value_ref| { @@ -184,7 +187,7 @@ where } /// Mutate the value under a key. Deletes the item if mutated to a `None`. - pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + pub fn mutate_exists + Clone, R, F: FnOnce(&mut Option) -> R>( key: KeyArg, f: F, ) -> R { @@ -197,7 +200,7 @@ where /// or if the storage item does not exist (`None`), independent of the `QueryType`. pub fn try_mutate_exists(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike, + KeyArg: EncodeLike + Clone, F: FnOnce(&mut Option) -> Result, { ::Map::try_mutate_exists(key, |option_value| { @@ -219,7 +222,7 @@ where } /// Take the value under a key. - pub fn take>(key: KeyArg) -> QueryKind::Query { + pub fn take + Clone>(key: KeyArg) -> QueryKind::Query { let removed_value = ::Map::mutate_exists(key, |value| value.take()); if removed_value.is_some() { CounterFor::::mutate(|value| value.saturating_dec()); @@ -237,7 +240,7 @@ where /// `[item]`. Any default value set for the storage item will be ignored on overwrite. pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) where - EncodeLikeKey: EncodeLike, + EncodeLikeKey: EncodeLike + Clone, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageAppend, @@ -352,7 +355,7 @@ where /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> where - KArg: EncodeLike, + KArg: EncodeLike + Clone, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageTryAppend, diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 9ba4cf052e82a..2e090d30119aa 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -25,7 +25,6 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, - StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -92,10 +91,10 @@ impl Key2: MaxEncodedLen, { fn get() -> u32 { - // The `max_len` of both key hashes plus the pallet prefix and storage prefix (which both - // are hashed with `Twox128`). - let z = - Hasher1::max_len::() + Hasher2::max_len::() + Twox128::max_len::<()>() * 2; + let z = Hasher1::max_len::() + + Hasher2::max_len::() + + Prefix::pallet_prefix().len() + + Prefix::STORAGE_PREFIX.len(); z as u32 } } @@ -756,16 +755,6 @@ mod test { } } - #[test] - fn keylenof_works() { - // Works with Blake2_128Concat and Twox64Concat. - type A = StorageDoubleMap; - let size = 16 * 2 // Two Twox128 - + 16 + 8 // Blake2_128Concat = hash + key - + 8 + 4; // Twox64Concat = hash + key - assert_eq!(KeyLenOf::::get(), size); - } - #[test] fn test() { type A = diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index 0f89e2378a55d..f4ac83c22663b 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -25,7 +25,6 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, - StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -62,9 +61,8 @@ where Key: FullCodec + MaxEncodedLen, { fn get() -> u32 { - // The `max_len` of the key hash plus the pallet prefix and storage prefix (which both are - // hashed with `Twox128`). - let z = Hasher::max_len::() + Twox128::max_len::<()>() * 2; + let z = + Hasher::max_len::() + Prefix::pallet_prefix().len() + Prefix::STORAGE_PREFIX.len(); z as u32 } } @@ -503,27 +501,6 @@ mod test { } } - #[test] - fn keylenof_works() { - // Works with Blake2_128Concat. - type A = StorageMap; - let size = 16 * 2 // Two Twox128 - + 16 + 4; // Blake2_128Concat = hash + key - assert_eq!(KeyLenOf::::get(), size); - - // Works with Blake2_256. - type B = StorageMap; - let size = 16 * 2 // Two Twox128 - + 32; // Blake2_256 - assert_eq!(KeyLenOf::::get(), size); - - // Works with Twox64Concat. - type C = StorageMap; - let size = 16 * 2 // Two Twox128 - + 8 + 4; // Twox64Concat = hash + key - assert_eq!(KeyLenOf::::get(), size); - } - #[test] fn test() { type A = StorageMap; diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs index f87da5de12274..0706e9fb377e2 100644 --- a/frame/support/src/storage/types/mod.rs +++ b/frame/support/src/storage/types/mod.rs @@ -42,11 +42,9 @@ pub use value::StorageValue; /// Trait implementing how the storage optional value is converted into the queried type. /// /// It is implemented by: -/// * `OptionQuery` which converts an optional value to an optional value, used when querying -/// storage returns an optional value. -/// * `ResultQuery` which converts an optional value to a result value, used when querying storage -/// returns a result value. -/// * `ValueQuery` which converts an optional value to a value, used when querying storage returns a +/// * `OptionQuery` which convert an optional value to an optional value, user when querying storage +/// will get an optional value. +/// * `ValueQuery` which convert an optional value to a value, user when querying storage will get a /// value. pub trait QueryKindTrait { /// Metadata for the storage kind. @@ -87,30 +85,6 @@ where } } -/// Implement QueryKindTrait with query being `Result` -pub struct ResultQuery(sp_std::marker::PhantomData); -impl QueryKindTrait for ResultQuery -where - Value: FullCodec + 'static, - Error: FullCodec + 'static, - OnEmpty: crate::traits::Get>, -{ - const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; - - type Query = Result; - - fn from_optional_value_to_query(v: Option) -> Self::Query { - match v { - Some(v) => Ok(v), - None => OnEmpty::get(), - } - } - - fn from_query_to_optional_value(v: Self::Query) -> Option { - v.ok() - } -} - /// Implement QueryKindTrait with query being `Value` pub struct ValueQuery; impl QueryKindTrait for ValueQuery diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index f09b715a970ad..72d6d6682f14a 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -50,20 +50,18 @@ mod error; pub use error::PalletError; mod filter; -pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter}; +pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter, IntegrityTest}; mod misc; pub use misc::{ defensive_prelude::{self, *}, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, - ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, - DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, - Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, - TryCollect, TryDrop, TypedGet, UnixTime, WrapperKeepOpaque, WrapperOpaque, + ConstU32, ConstU64, ConstU8, DefensiveSaturating, EnsureInherentsAreFirst, EqualPrivilegeOnly, + EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, + IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PreimageProvider, + PreimageRecipient, PrivilegeCmp, SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, + WrapperKeepOpaque, WrapperOpaque, }; -#[allow(deprecated)] -pub use misc::{PreimageProvider, PreimageRecipient}; #[doc(hidden)] pub use misc::{DEFENSIVE_OP_INTERNAL_ERROR, DEFENSIVE_OP_PUBLIC_ERROR}; @@ -83,24 +81,23 @@ mod hooks; #[cfg(feature = "std")] pub use hooks::GenesisBuild; pub use hooks::{ - Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, - OnTimestampSet, + Hooks, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, }; +#[cfg(feature = "try-runtime")] +pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; pub mod schedule; mod storage; pub use storage::{ Instance, PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, StorageInstance, - TrackedStorageKey, WhitelistedStorageKeys, }; mod dispatch; #[allow(deprecated)] pub use dispatch::EnsureOneOf; pub use dispatch::{ - AsEnsureOriginWithArg, CallerTrait, EitherOf, EitherOfDiverse, EnsureOrigin, - EnsureOriginWithArg, MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, - UnfilteredDispatchable, + AsEnsureOriginWithArg, EitherOf, EitherOfDiverse, EnsureOrigin, EnsureOriginWithArg, + MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, UnfilteredDispatchable, }; mod voting; @@ -108,11 +105,3 @@ pub use voting::{ ClassCountOf, CurrencyToVote, PollStatus, Polling, SaturatingCurrencyToVote, U128CurrencyToVote, VoteTally, }; - -mod preimages; -pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; - -#[cfg(feature = "try-runtime")] -mod try_runtime; -#[cfg(feature = "try-runtime")] -pub use try_runtime::{Select as TryStateSelect, TryState}; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index b96cfae4500e2..afc819aa454e5 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -229,32 +229,24 @@ impl< /// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by /// `construct_runtime`. pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Config::RuntimeOrigin`). - type RuntimeOrigin; + /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). + type Origin; /// Dispatch this call but do not check the filter in origin. - fn dispatch_bypass_filter(self, origin: Self::RuntimeOrigin) -> DispatchResultWithPostInfo; + fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; } -/// The trait implemented by the overarching enumeration of the different pallets' origins. -/// Unlike `OriginTrait` impls, this does not include any kind of dispatch/call filter. Also, this -/// trait is more flexible in terms of how it can be used: it is a `Parameter` and `Member`, so it -/// can be used as dispatchable parameters as well as in storage items. -pub trait CallerTrait: Parameter + Member + From> { - /// Extract the signer from the message if it is a `Signed` origin. - fn into_system(self) -> Option>; - - /// Extract a reference to the system-level `RawOrigin` if it is that. - fn as_system_ref(&self) -> Option<&RawOrigin>; -} - -/// Methods available on `frame_system::Config::RuntimeOrigin`. +/// Methods available on `frame_system::Config::Origin`. pub trait OriginTrait: Sized { /// Runtime call type, as in `frame_system::Config::Call` type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; + type PalletsOrigin: Parameter + + Member + + Into + + From> + + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -274,12 +266,9 @@ pub trait OriginTrait: Sized { /// For root origin caller, the filters are bypassed and true is returned. fn filter_call(&self, call: &Self::Call) -> bool; - /// Get a reference to the caller (`CallerTrait` impl). + /// Get the caller. fn caller(&self) -> &Self::PalletsOrigin; - /// Consume `self` and return the caller. - fn into_caller(self) -> Self::PalletsOrigin; - /// Do something with the caller, consuming self but returning it if the caller was unused. fn try_with_caller( self, @@ -296,20 +285,7 @@ pub trait OriginTrait: Sized { fn signed(by: Self::AccountId) -> Self; /// Extract the signer from the message if it is a `Signed` origin. - fn as_signed(self) -> Option { - self.into_caller().into_system().and_then(|s| { - if let RawOrigin::Signed(who) = s { - Some(who) - } else { - None - } - }) - } - - /// Extract a reference to the sytsem origin, if that's what the caller is. - fn as_system_ref(&self) -> Option<&RawOrigin> { - self.caller().as_system_ref() - } + fn as_signed(self) -> Option; } #[cfg(test)] diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs index cdd82a3124e63..95e5954184b4b 100644 --- a/frame/support/src/traits/filter.rs +++ b/frame/support/src/traits/filter.rs @@ -180,6 +180,17 @@ macro_rules! impl_filter_stack { } } +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + #[cfg(test)] pub mod test_impl_filter_stack { use super::*; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 3415682c0b382..385db4e4d1ad9 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -17,13 +17,9 @@ //! Traits for hooking tasks to events in a blockchain's lifecycle. -use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; +use sp_arithmetic::traits::Saturating; use sp_runtime::traits::AtLeast32BitUnsigned; -use sp_std::prelude::*; - -#[cfg(all(feature = "try-runtime", test))] -use codec::{Decode, Encode}; /// The block initialization trait. /// @@ -37,17 +33,15 @@ pub trait OnInitialize { /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, /// including inherent extrinsics. Hence for instance, if you runtime includes /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. - fn on_initialize(_n: BlockNumber) -> Weight { - Weight::zero() + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 } } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_for_tuples(30)] impl OnInitialize for Tuple { - fn on_initialize(n: BlockNumber) -> Weight { - let mut weight = Weight::zero(); + fn on_initialize(n: BlockNumber) -> crate::weights::Weight { + let mut weight = 0; for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); weight } @@ -56,9 +50,7 @@ impl OnInitialize for Tuple { /// The block finalization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is ending. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_for_tuples(30)] pub trait OnFinalize { /// The block is being finalized. Implement to have something happen. /// @@ -79,27 +71,30 @@ pub trait OnIdle { /// /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - /// in a block are applied but before `on_finalize` is executed. - fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { - Weight::zero() + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight, + ) -> crate::weights::Weight { + 0 } } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_for_tuples(30)] impl OnIdle for Tuple { - fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { - let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = - &[for_tuples!( #( Tuple::on_idle ),* )]; - let mut weight = Weight::zero(); + fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { + let on_idle_functions: &[fn( + BlockNumber, + crate::weights::Weight, + ) -> crate::weights::Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; + let mut weight = 0; let len = on_idle_functions.len(); let start_index = n % (len as u32).into(); let start_index = start_index.try_into().ok().expect( "`start_index % len` always fits into `usize`, because `len` can be in maximum `usize::MAX`; qed" ); - for on_idle_fn in on_idle_functions.iter().cycle().skip(start_index).take(len) { + for on_idle in on_idle_functions.iter().cycle().skip(start_index).take(len) { let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); - weight = weight.saturating_add(on_idle_fn(n, adjusted_remaining_weight)); + weight = weight.saturating_add(on_idle(n, adjusted_remaining_weight)); } weight } @@ -110,14 +105,53 @@ impl OnIdle for Tuple { /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and /// will see the genesis state after all pallets have written their genesis state. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_for_tuples(30)] pub trait OnGenesis { /// Something that should happen at genesis. fn on_genesis() {} } +/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. +#[cfg(feature = "try-runtime")] +pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; + +/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. +#[cfg(feature = "try-runtime")] +pub trait OnRuntimeUpgradeHelpersExt { + /// Generate a storage key unique to this runtime upgrade. + /// + /// This can be used to communicate data from pre-upgrade to post-upgrade state and check + /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. + #[cfg(feature = "try-runtime")] + fn storage_key(ident: &str) -> [u8; 32] { + crate::storage::storage_prefix(ON_RUNTIME_UPGRADE_PREFIX, ident.as_bytes()) + } + + /// Get temporary storage data written by [`Self::set_temp_storage`]. + /// + /// Returns `None` if either the data is unavailable or un-decodable. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being read from. + #[cfg(feature = "try-runtime")] + fn get_temp_storage(at: &str) -> Option { + sp_io::storage::get(&Self::storage_key(at)) + .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) + } + + /// Write some temporary data to a specific storage that can be read (potentially in + /// post-upgrade hook) via [`Self::get_temp_storage`]. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being written + /// to. + #[cfg(feature = "try-runtime")] + fn set_temp_storage(data: T, at: &str) { + sp_io::storage::set(&Self::storage_key(at), &data.encode()); + } +} + +#[cfg(feature = "try-runtime")] +impl OnRuntimeUpgradeHelpersExt for U {} + /// The runtime upgrade trait. /// /// Implementing this lets you express what should happen when the runtime upgrades, @@ -132,100 +166,50 @@ pub trait OnRuntimeUpgrade { /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> Weight { - Weight::zero() + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 } /// Execute some pre-checks prior to a runtime upgrade. /// - /// Return a `Vec` that can contain arbitrary encoded data (usually some pre-upgrade state), - /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector - /// should be returned if there is no such need. - /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - Ok(Vec::new()) + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) } /// Execute some post-checks after a runtime upgrade. /// - /// The `state` parameter is the `Vec` returned by `pre_upgrade` before upgrading, which - /// can be used for post-check. NOTE: if `pre_upgrade` is not implemented an empty vector will - /// be passed in, in such case `post_upgrade` should ignore it. - /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { Ok(()) } } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_for_tuples(30)] impl OnRuntimeUpgrade for Tuple { - #[cfg(not(feature = "try-runtime"))] - fn on_runtime_upgrade() -> Weight { - let mut weight = Weight::zero(); + fn on_runtime_upgrade() -> crate::weights::Weight { + let mut weight = 0; for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); weight } #[cfg(feature = "try-runtime")] - /// We are executing pre- and post-checks sequentially in order to be able to test several - /// consecutive migrations for the same pallet without errors. Therefore pre and post upgrade - /// hooks for tuples are a noop. - fn on_runtime_upgrade() -> Weight { - use scale_info::prelude::format; - - let mut weight = Weight::zero(); - // migration index in the tuple, start with 1 for better readability - let mut i = 1; - for_tuples!( #( - let _guard = frame_support::StorageNoopGuard::default(); - // we want to panic if any checks fail right here right now. - let state = Tuple::pre_upgrade().expect(&format!("PreUpgrade failed for migration #{}", i)); - drop(_guard); - - weight = weight.saturating_add(Tuple::on_runtime_upgrade()); - - let _guard = frame_support::StorageNoopGuard::default(); - // we want to panic if any checks fail right here right now. - Tuple::post_upgrade(state).expect(&format!("PostUpgrade failed for migration #{}", i)); - drop(_guard); - - i += 1; - )* ); - weight + fn pre_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); + result } #[cfg(feature = "try-runtime")] - /// noop - fn pre_upgrade() -> Result, &'static str> { - Ok(Vec::new()) - } - - #[cfg(feature = "try-runtime")] - /// noop - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - Ok(()) + fn post_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); + result } } -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - /// The pallet hooks trait. Implementing this lets you express some logic to execute. pub trait Hooks { /// The block is being finalized. Implement to have something happen. @@ -236,15 +220,18 @@ pub trait Hooks { /// Will not fire if the remaining weight is 0. /// Return the weight used, the hook will subtract it from current weight used /// and pass the result to the next `on_idle` hook if it exists. - fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { - Weight::zero() + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight, + ) -> crate::weights::Weight { + 0 } /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> Weight { - Weight::zero() + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 } /// Perform a module upgrade. @@ -266,40 +253,23 @@ pub trait Hooks { /// pallet is discouraged and might get deprecated in the future. Alternatively, export the same /// logic as a free-function from your pallet, and pass it to `type Executive` from the /// top-level runtime. - fn on_runtime_upgrade() -> Weight { - Weight::zero() - } - - /// Execute the sanity checks of this pallet, per block. - /// - /// It should focus on certain checks to ensure that the state is sensible. This is never - /// executed in a consensus code-path, therefore it can consume as much weight as it needs. - #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumber) -> Result<(), &'static str> { - Ok(()) + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 } /// Execute some pre-checks prior to a runtime upgrade. /// - /// Return a `Vec` that can contain arbitrary encoded data (usually some pre-upgrade state), - /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector - /// should be returned if there is no such need. - /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - Ok(Vec::new()) + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) } /// Execute some post-checks after a runtime upgrade. /// - /// The `state` parameter is the `Vec` returned by `pre_upgrade` before upgrading, which - /// can be used for post-check. NOTE: if `pre_upgrade` is not implemented an empty vector will - /// be passed in, in such case `post_upgrade` should ignore it. - /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + fn post_upgrade() -> Result<(), &'static str> { Ok(()) } @@ -351,9 +321,7 @@ pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeD } /// A trait which is called when the timestamp is set in the runtime. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_for_tuples(30)] pub trait OnTimestampSet { /// Called when the timestamp is set. fn on_timestamp_set(moment: Moment); @@ -365,24 +333,20 @@ mod tests { #[test] fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { - use sp_io::TestExternalities; struct Test; - impl OnInitialize for Test { - fn on_initialize(_n: u8) -> Weight { - Weight::from_ref_time(10) + fn on_initialize(_n: u8) -> crate::weights::Weight { + 10 } } impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> Weight { - Weight::from_ref_time(20) + fn on_runtime_upgrade() -> crate::weights::Weight { + 20 } } - TestExternalities::default().execute_with(|| { - assert_eq!(<(Test, Test)>::on_initialize(0), Weight::from_ref_time(20)); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), Weight::from_ref_time(40)); - }); + assert_eq!(<(Test, Test)>::on_initialize(0), 20); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); } #[test] @@ -394,156 +358,50 @@ mod tests { struct Test3; type TestTuple = (Test1, Test2, Test3); impl OnIdle for Test1 { - fn on_idle(_n: u32, _weight: Weight) -> Weight { + fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test1"); } - Weight::zero() + 0 } } impl OnIdle for Test2 { - fn on_idle(_n: u32, _weight: Weight) -> Weight { + fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test2"); } - Weight::zero() + 0 } } impl OnIdle for Test3 { - fn on_idle(_n: u32, _weight: Weight) -> Weight { + fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test3"); } - Weight::zero() + 0 } } unsafe { - TestTuple::on_idle(0, Weight::zero()); + TestTuple::on_idle(0, 0); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(1, Weight::zero()); + TestTuple::on_idle(1, 0); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(2, Weight::zero()); + TestTuple::on_idle(2, 0); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test3", "Test1", "Test2"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(3, Weight::zero()); + TestTuple::on_idle(3, 0); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(4, Weight::zero()); + TestTuple::on_idle(4, 0); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); } } - - #[cfg(feature = "try-runtime")] - #[test] - #[allow(dead_code)] - fn on_runtime_upgrade_tuple() { - use frame_support::parameter_types; - use sp_io::TestExternalities; - - struct Test1; - struct Test2; - struct Test3; - - parameter_types! { - static Test1Assertions: u8 = 0; - static Test2Assertions: u8 = 0; - static Test3Assertions: u8 = 0; - static EnableSequentialTest: bool = false; - static SequentialAssertions: u8 = 0; - } - - impl OnRuntimeUpgrade for Test1 { - fn pre_upgrade() -> Result, &'static str> { - Ok("Test1".encode()) - } - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let s: String = Decode::decode(&mut state.as_slice()).unwrap(); - Test1Assertions::mutate(|val| *val += 1); - if EnableSequentialTest::get() { - SequentialAssertions::mutate(|val| *val += 1); - } - assert_eq!(s, "Test1"); - Ok(()) - } - } - - impl OnRuntimeUpgrade for Test2 { - fn pre_upgrade() -> Result, &'static str> { - Ok(100u32.encode()) - } - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let s: u32 = Decode::decode(&mut state.as_slice()).unwrap(); - Test2Assertions::mutate(|val| *val += 1); - if EnableSequentialTest::get() { - assert_eq!(SequentialAssertions::get(), 1); - SequentialAssertions::mutate(|val| *val += 1); - } - assert_eq!(s, 100); - Ok(()) - } - } - - impl OnRuntimeUpgrade for Test3 { - fn pre_upgrade() -> Result, &'static str> { - Ok(true.encode()) - } - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let s: bool = Decode::decode(&mut state.as_slice()).unwrap(); - Test3Assertions::mutate(|val| *val += 1); - if EnableSequentialTest::get() { - assert_eq!(SequentialAssertions::get(), 2); - SequentialAssertions::mutate(|val| *val += 1); - } - assert_eq!(s, true); - Ok(()) - } - } - - TestExternalities::default().execute_with(|| { - type TestEmpty = (); - let origin_state = ::pre_upgrade().unwrap(); - assert!(origin_state.is_empty()); - ::post_upgrade(origin_state).unwrap(); - - type Test1Tuple = (Test1,); - let origin_state = ::pre_upgrade().unwrap(); - assert!(origin_state.is_empty()); - ::post_upgrade(origin_state).unwrap(); - assert_eq!(Test1Assertions::get(), 0); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - - type Test321 = (Test3, Test2, Test1); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - assert_eq!(Test2Assertions::take(), 1); - assert_eq!(Test3Assertions::take(), 1); - - // enable sequential tests - EnableSequentialTest::mutate(|val| *val = true); - - type Test123 = (Test1, Test2, Test3); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - assert_eq!(Test2Assertions::take(), 1); - assert_eq!(Test3Assertions::take(), 1); - - // reset assertions - SequentialAssertions::take(); - - type TestNested123 = (Test1, (Test2, Test3)); - ::on_runtime_upgrade(); - assert_eq!(Test1Assertions::take(), 1); - assert_eq!(Test2Assertions::take(), 1); - assert_eq!(Test3Assertions::take(), 1); - }); - } } diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs index daf2d3aa6517d..8c69a2aaccb33 100644 --- a/frame/support/src/traits/members.rs +++ b/frame/support/src/traits/members.rs @@ -17,7 +17,6 @@ //! Traits for dealing with the idea of membership. -use impl_trait_for_tuples::impl_for_tuples; use sp_std::{marker::PhantomData, prelude::*}; /// A trait for querying whether a type can be said to "contain" a value. @@ -26,9 +25,7 @@ pub trait Contains { fn contains(t: &T) -> bool; } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] impl Contains for Tuple { fn contains(t: &T) -> bool { for_tuples!( #( @@ -44,9 +41,7 @@ pub trait ContainsPair { fn contains(a: &A, b: &B) -> bool; } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(0, 30)] impl ContainsPair for Tuple { fn contains(a: &A, b: &B) -> bool { for_tuples!( #( diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index 42f2d759a597d..d3dc57e1ee52d 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -18,7 +18,6 @@ //! Traits for managing information attached to pallets and their constituents. use codec::{Decode, Encode}; -use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; @@ -71,22 +70,40 @@ pub trait PalletsInfoAccess { /// /// You probably don't want this function but `infos()` instead. fn count() -> usize { - // for backwards compatibility with XCM-3, Mark as deprecated. - Self::infos().len() + 0 } + /// Extend the given vector by all of the pallets' information that this type represents. + /// + /// You probably don't want this function but `infos()` instead. + fn accumulate(_accumulator: &mut Vec) {} + /// All of the pallets' information that this type represents. - fn infos() -> Vec; + fn infos() -> Vec { + let mut result = Vec::with_capacity(Self::count()); + Self::accumulate(&mut result); + result + } } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] -impl PalletsInfoAccess for Tuple { - fn infos() -> Vec { - let mut res = vec![]; - for_tuples!( #( res.extend(Tuple::infos()); )* ); - res +impl PalletsInfoAccess for () {} +impl PalletsInfoAccess for (T,) { + fn count() -> usize { + T::count() + } + fn accumulate(acc: &mut Vec) { + T::accumulate(acc) + } +} + +impl PalletsInfoAccess for (T1, T2) { + fn count() -> usize { + T1::count() + T2::count() + } + fn accumulate(acc: &mut Vec) { + // The AllPallets type tuplises the pallets in reverse order, so we unreverse them here. + T2::accumulate(acc); + T1::accumulate(acc); } } diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index ce8faaaf37c3d..ccbb47909d5f4 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -18,11 +18,9 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::Parameter; -use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; -use impl_trait_for_tuples::impl_for_tuples; +use codec::{CompactLen, Decode, DecodeAll, Encode, EncodeLike, Input, MaxEncodedLen}; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; -use sp_core::bounded::bounded_vec::TruncateFrom; #[doc(hidden)] pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, @@ -152,10 +150,10 @@ pub trait DefensiveOption { /// Defensively transform this option to a result, mapping `None` to the return value of an /// error closure. - fn defensive_ok_or_else E>(self, err: F) -> Result; + fn defensive_ok_or_else E>(self, err: F) -> Result; /// Defensively transform this option to a result, mapping `None` to a default value. - fn defensive_ok_or(self, err: E) -> Result; + fn defensive_ok_or(self, err: E) -> Result; /// Exactly the same as `map`, but it prints the appropriate warnings if the value being mapped /// is `None`. @@ -319,17 +317,16 @@ impl DefensiveOption for Option { ) } - fn defensive_ok_or_else E>(self, err: F) -> Result { + fn defensive_ok_or_else E>(self, err: F) -> Result { self.ok_or_else(|| { - let err_value = err(); - defensive!(err_value); - err_value + defensive!(); + err() }) } - fn defensive_ok_or(self, err: E) -> Result { + fn defensive_ok_or(self, err: E) -> Result { self.ok_or_else(|| { - defensive!(err); + defensive!(); err }) } @@ -370,171 +367,6 @@ impl DefensiveSaturating f } } -/// Construct an object by defensively truncating an input if the `TryFrom` conversion fails. -pub trait DefensiveTruncateFrom { - /// Use `TryFrom` first and defensively fall back to truncating otherwise. - /// - /// # Example - /// - /// ``` - /// use frame_support::{BoundedVec, traits::DefensiveTruncateFrom}; - /// use sp_runtime::traits::ConstU32; - /// - /// let unbound = vec![1, 2]; - /// let bound = BoundedVec::>::defensive_truncate_from(unbound); - /// - /// assert_eq!(bound, vec![1, 2]); - /// ``` - fn defensive_truncate_from(unbound: T) -> Self; -} - -impl DefensiveTruncateFrom for T -where - // NOTE: We use the fact that `BoundedVec` and - // `BoundedSlice` use `Self` as error type. We could also - // require a `Clone` bound and use `unbound.clone()` in the - // error case. - T: TruncateFrom + TryFrom, -{ - fn defensive_truncate_from(unbound: U) -> Self { - unbound.try_into().map_or_else( - |err| { - defensive!("DefensiveTruncateFrom truncating"); - T::truncate_from(err) - }, - |bound| bound, - ) - } -} - -/// Defensively calculates the minimum of two values. -/// -/// Can be used in contexts where we assume the receiver value to be (strictly) smaller. -pub trait DefensiveMin { - /// Returns the minimum and defensively checks that `self` is not larger than `other`. - /// - /// # Example - /// - /// ``` - /// use frame_support::traits::DefensiveMin; - /// // min(3, 4) is 3. - /// assert_eq!(3, 3_u32.defensive_min(4_u32)); - /// // min(4, 4) is 4. - /// assert_eq!(4, 4_u32.defensive_min(4_u32)); - /// ``` - /// - /// ```should_panic - /// use frame_support::traits::DefensiveMin; - /// // min(4, 3) panics. - /// 4_u32.defensive_min(3_u32); - /// ``` - fn defensive_min(self, other: T) -> Self; - - /// Returns the minimum and defensively checks that `self` is smaller than `other`. - /// - /// # Example - /// - /// ``` - /// use frame_support::traits::DefensiveMin; - /// // min(3, 4) is 3. - /// assert_eq!(3, 3_u32.defensive_strict_min(4_u32)); - /// ``` - /// - /// ```should_panic - /// use frame_support::traits::DefensiveMin; - /// // min(4, 4) panics. - /// 4_u32.defensive_strict_min(4_u32); - /// ``` - fn defensive_strict_min(self, other: T) -> Self; -} - -impl DefensiveMin for T -where - T: sp_std::cmp::PartialOrd, -{ - fn defensive_min(self, other: T) -> Self { - if self <= other { - self - } else { - defensive!("DefensiveMin"); - other - } - } - - fn defensive_strict_min(self, other: T) -> Self { - if self < other { - self - } else { - defensive!("DefensiveMin strict"); - other - } - } -} - -/// Defensively calculates the maximum of two values. -/// -/// Can be used in contexts where we assume the receiver value to be (strictly) larger. -pub trait DefensiveMax { - /// Returns the maximum and defensively asserts that `other` is not larger than `self`. - /// - /// # Example - /// - /// ``` - /// use frame_support::traits::DefensiveMax; - /// // max(4, 3) is 4. - /// assert_eq!(4, 4_u32.defensive_max(3_u32)); - /// // max(4, 4) is 4. - /// assert_eq!(4, 4_u32.defensive_max(4_u32)); - /// ``` - /// - /// ```should_panic - /// use frame_support::traits::DefensiveMax; - /// // max(4, 5) panics. - /// 4_u32.defensive_max(5_u32); - /// ``` - fn defensive_max(self, other: T) -> Self; - - /// Returns the maximum and defensively asserts that `other` is smaller than `self`. - /// - /// # Example - /// - /// ``` - /// use frame_support::traits::DefensiveMax; - /// // y(4, 3) is 4. - /// assert_eq!(4, 4_u32.defensive_strict_max(3_u32)); - /// ``` - /// - /// ```should_panic - /// use frame_support::traits::DefensiveMax; - /// // max(4, 4) panics. - /// 4_u32.defensive_strict_max(4_u32); - /// ``` - fn defensive_strict_max(self, other: T) -> Self; -} - -impl DefensiveMax for T -where - T: sp_std::cmp::PartialOrd, -{ - fn defensive_max(self, other: T) -> Self { - if self >= other { - self - } else { - defensive!("DefensiveMax"); - other - } - } - - fn defensive_strict_max(self, other: T) -> Self { - if self > other { - self - } else { - defensive!("DefensiveMax strict"); - other - } - } -} - /// Anything that can have a `::len()` method. pub trait Len { /// Return the length of data type. @@ -635,18 +467,14 @@ impl SameOrOther { } /// Handler for when a new account has been created. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnNewAccount { /// A new account `who` has been registered. fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnKilledAccount { /// The account with the given id was reaped. fn on_killed_account(who: &AccountId); @@ -804,9 +632,7 @@ impl PrivilegeCmp for EqualPrivilegeOnly { /// but cannot preform any alterations. More specifically alterations are /// not forbidden, but they are not persisted in any way after the worker /// has finished. -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OffchainWorker { /// This function is being called after every block import (when fully synced). /// @@ -877,13 +703,13 @@ pub trait EstimateCallFee { /// /// The dispatch info and the length is deduced from the call. The post info can optionally be /// provided. - fn estimate_call_fee(call: &Call, post_info: crate::dispatch::PostDispatchInfo) -> Balance; + fn estimate_call_fee(call: &Call, post_info: crate::weights::PostDispatchInfo) -> Balance; } // Useful for building mocks. #[cfg(feature = "std")] impl, const T: u32> EstimateCallFee for ConstU32 { - fn estimate_call_fee(_: &Call, _: crate::dispatch::PostDispatchInfo) -> Balance { + fn estimate_call_fee(_: &Call, _: crate::weights::PostDispatchInfo) -> Balance { T.into() } } @@ -919,10 +745,7 @@ impl Encode for WrapperOpaque { impl Decode for WrapperOpaque { fn decode(input: &mut I) -> Result { - Ok(Self(T::decode_all_with_depth_limit( - sp_api::MAX_EXTRINSIC_DEPTH, - &mut &>::decode(input)?[..], - )?)) + Ok(Self(T::decode_all(&mut &>::decode(input)?[..])?)) } fn skip(input: &mut I) -> Result<(), codec::Error> { @@ -940,7 +763,7 @@ impl MaxEncodedLen for WrapperOpaque { fn max_encoded_len() -> usize { let t_max_len = T::max_encoded_len(); - // See scale encoding: https://docs.substrate.io/reference/scale-codec/ + // See scale encoding https://docs.substrate.io/v3/advanced/scale-codec if t_max_len < 64 { t_max_len + 1 } else if t_max_len < 2usize.pow(14) { @@ -984,7 +807,7 @@ impl WrapperKeepOpaque { /// /// Returns `None` if the decoding failed. pub fn try_decode(&self) -> Option { - T::decode_all_with_depth_limit(sp_api::MAX_EXTRINSIC_DEPTH, &mut &self.data[..]).ok() + T::decode_all(&mut &self.data[..]).ok() } /// Returns the length of the encoded `T`. @@ -1098,7 +921,7 @@ pub trait PreimageRecipient: PreimageProvider { /// Maximum size of a preimage. type MaxSize: Get; - /// Store the bytes of a preimage on chain infallible due to the bounded type. + /// Store the bytes of a preimage on chain. fn note_preimage(bytes: crate::BoundedVec); /// Clear a previously noted preimage. This is infallible and should be treated more like a @@ -1116,81 +939,6 @@ impl PreimageRecipient for () { #[cfg(test)] mod test { use super::*; - use sp_core::bounded::{BoundedSlice, BoundedVec}; - use sp_std::marker::PhantomData; - - #[test] - #[cfg(not(debug_assertions))] - fn defensive_truncating_from_vec_defensive_works() { - let unbound = vec![1u32, 2]; - let bound = BoundedVec::>::defensive_truncate_from(unbound); - assert_eq!(bound, vec![1u32]); - } - - #[test] - #[cfg(not(debug_assertions))] - fn defensive_truncating_from_slice_defensive_works() { - let unbound = &[1u32, 2]; - let bound = BoundedSlice::>::defensive_truncate_from(unbound); - assert_eq!(bound, &[1u32][..]); - } - - #[test] - #[cfg(debug_assertions)] - #[should_panic( - expected = "Defensive failure has been triggered!: \"DefensiveTruncateFrom truncating\"" - )] - fn defensive_truncating_from_vec_defensive_panics() { - let unbound = vec![1u32, 2]; - let _ = BoundedVec::>::defensive_truncate_from(unbound); - } - - #[test] - #[cfg(debug_assertions)] - #[should_panic( - expected = "Defensive failure has been triggered!: \"DefensiveTruncateFrom truncating\"" - )] - fn defensive_truncating_from_slice_defensive_panics() { - let unbound = &[1u32, 2]; - let _ = BoundedSlice::>::defensive_truncate_from(unbound); - } - - #[test] - fn defensive_truncate_from_vec_works() { - let unbound = vec![1u32, 2, 3]; - let bound = BoundedVec::>::defensive_truncate_from(unbound.clone()); - assert_eq!(bound, unbound); - } - - #[test] - fn defensive_truncate_from_slice_works() { - let unbound = [1u32, 2, 3]; - let bound = BoundedSlice::>::defensive_truncate_from(&unbound); - assert_eq!(bound, &unbound[..]); - } - - #[derive(Encode, Decode)] - enum NestedType { - Nested(Box), - Done, - } - - #[test] - fn test_opaque_wrapper_decode_limit() { - let limit = sp_api::MAX_EXTRINSIC_DEPTH as usize; - let mut ok_bytes = vec![0u8; limit]; - ok_bytes.push(1u8); - let mut err_bytes = vec![0u8; limit + 1]; - err_bytes.push(1u8); - assert!(>::decode(&mut &ok_bytes.encode()[..]).is_ok()); - assert!(>::decode(&mut &err_bytes.encode()[..]).is_err()); - - let ok_keep_opaque = WrapperKeepOpaque { data: ok_bytes, _phantom: PhantomData }; - let err_keep_opaque = WrapperKeepOpaque { data: err_bytes, _phantom: PhantomData }; - - assert!(>::try_decode(&ok_keep_opaque).is_some()); - assert!(>::try_decode(&err_keep_opaque).is_none()); - } #[test] fn test_opaque_wrapper() { @@ -1237,52 +985,4 @@ mod test { let data = decoded.encode(); WrapperOpaque::::decode(&mut &data[..]).unwrap(); } - - #[test] - fn defensive_min_works() { - assert_eq!(10, 10_u32.defensive_min(11_u32)); - assert_eq!(10, 10_u32.defensive_min(10_u32)); - } - - #[test] - #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMin\"")] - fn defensive_min_panics() { - 10_u32.defensive_min(9_u32); - } - - #[test] - fn defensive_strict_min_works() { - assert_eq!(10, 10_u32.defensive_strict_min(11_u32)); - assert_eq!(9, 9_u32.defensive_strict_min(10_u32)); - } - - #[test] - #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMin strict\"")] - fn defensive_strict_min_panics() { - 9_u32.defensive_strict_min(9_u32); - } - - #[test] - fn defensive_max_works() { - assert_eq!(11, 11_u32.defensive_max(10_u32)); - assert_eq!(10, 10_u32.defensive_max(10_u32)); - } - - #[test] - #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMax\"")] - fn defensive_max_panics() { - 9_u32.defensive_max(10_u32); - } - - #[test] - fn defensive_strict_max_works() { - assert_eq!(11, 11_u32.defensive_strict_max(10_u32)); - assert_eq!(10, 10_u32.defensive_strict_max(9_u32)); - } - - #[test] - #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMax strict\"")] - fn defensive_strict_max_panics() { - 9_u32.defensive_strict_max(9_u32); - } } diff --git a/frame/support/src/traits/preimages.rs b/frame/support/src/traits/preimages.rs deleted file mode 100644 index 594532ba96903..0000000000000 --- a/frame/support/src/traits/preimages.rs +++ /dev/null @@ -1,317 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Stuff for dealing with 32-byte hashed preimages. - -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use sp_core::{RuntimeDebug, H256}; -use sp_io::hashing::blake2_256; -use sp_runtime::{traits::ConstU32, DispatchError}; -use sp_std::borrow::Cow; - -pub type Hash = H256; -pub type BoundedInline = crate::BoundedVec>; - -#[derive( - Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, scale_info::TypeInfo, RuntimeDebug, -)] -#[codec(mel_bound())] -pub enum Bounded { - /// A Blake2 256 hash with no preimage length. We - /// do not support creation of this except for transitioning from legacy state. - /// In the future we will make this a pure `Dummy` item storing only the final `dummy` field. - Legacy { hash: Hash, dummy: sp_std::marker::PhantomData }, - /// A an bounded `Call`. Its encoding must be at most 128 bytes. - Inline(BoundedInline), - /// A Blake2-256 hash of the call together with an upper limit for its size. - Lookup { hash: Hash, len: u32 }, -} - -impl Bounded { - /// Casts the wrapped type into something that encodes alike. - /// - /// # Examples - /// ``` - /// use frame_support::traits::Bounded; - /// - /// // Transmute from `String` to `&str`. - /// let x: Bounded = Bounded::Inline(Default::default()); - /// let _: Bounded<&str> = x.transmute(); - /// ``` - pub fn transmute(self) -> Bounded - where - T: Encode + EncodeLike, - { - use Bounded::*; - match self { - Legacy { hash, .. } => Legacy { hash, dummy: sp_std::marker::PhantomData }, - Inline(x) => Inline(x), - Lookup { hash, len } => Lookup { hash, len }, - } - } - - /// Returns the hash of the preimage. - /// - /// The hash is re-calculated every time if the preimage is inlined. - pub fn hash(&self) -> H256 { - use Bounded::*; - match self { - Legacy { hash, .. } => *hash, - Inline(x) => blake2_256(x.as_ref()).into(), - Lookup { hash, .. } => *hash, - } - } -} - -// The maximum we expect a single legacy hash lookup to be. -const MAX_LEGACY_LEN: u32 = 1_000_000; - -impl Bounded { - /// Returns the length of the preimage or `None` if the length is unknown. - pub fn len(&self) -> Option { - match self { - Self::Legacy { .. } => None, - Self::Inline(i) => Some(i.len() as u32), - Self::Lookup { len, .. } => Some(*len), - } - } - - /// Returns whether the image will require a lookup to be peeked. - pub fn lookup_needed(&self) -> bool { - match self { - Self::Inline(..) => false, - Self::Legacy { .. } | Self::Lookup { .. } => true, - } - } - - /// The maximum length of the lookup that is needed to peek `Self`. - pub fn lookup_len(&self) -> Option { - match self { - Self::Inline(..) => None, - Self::Legacy { .. } => Some(MAX_LEGACY_LEN), - Self::Lookup { len, .. } => Some(*len), - } - } - - /// Constructs a `Lookup` bounded item. - pub fn unrequested(hash: Hash, len: u32) -> Self { - Self::Lookup { hash, len } - } - - /// Constructs a `Legacy` bounded item. - #[deprecated = "This API is only for transitioning to Scheduler v3 API"] - pub fn from_legacy_hash(hash: impl Into) -> Self { - Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } - } -} - -pub type FetchResult = Result, DispatchError>; - -/// A interface for looking up preimages from their hash on chain. -pub trait QueryPreimage { - /// Returns whether a preimage exists for a given hash and if so its length. - fn len(hash: &Hash) -> Option; - - /// Returns the preimage for a given hash. If given, `len` must be the size of the preimage. - fn fetch(hash: &Hash, len: Option) -> FetchResult; - - /// Returns whether a preimage request exists for a given hash. - fn is_requested(hash: &Hash) -> bool; - - /// Request that someone report a preimage. Providers use this to optimise the economics for - /// preimage reporting. - fn request(hash: &Hash); - - /// Cancel a previous preimage request. - fn unrequest(hash: &Hash); - - /// Request that the data required for decoding the given `bounded` value is made available. - fn hold(bounded: &Bounded) { - use Bounded::*; - match bounded { - Inline(..) => {}, - Legacy { hash, .. } | Lookup { hash, .. } => Self::request(hash), - } - } - - /// No longer request that the data required for decoding the given `bounded` value is made - /// available. - fn drop(bounded: &Bounded) { - use Bounded::*; - match bounded { - Inline(..) => {}, - Legacy { hash, .. } | Lookup { hash, .. } => Self::unrequest(hash), - } - } - - /// Check to see if all data required for the given `bounded` value is available for its - /// decoding. - fn have(bounded: &Bounded) -> bool { - use Bounded::*; - match bounded { - Inline(..) => true, - Legacy { hash, .. } | Lookup { hash, .. } => Self::len(hash).is_some(), - } - } - - /// Create a `Bounded` instance based on the `hash` and `len` of the encoded value. This may not - /// be `peek`-able or `realize`-able. - fn pick(hash: Hash, len: u32) -> Bounded { - Self::request(&hash); - Bounded::Lookup { hash, len } - } - - /// Convert the given `bounded` instance back into its original instance, also returning the - /// exact size of its encoded form if it needed to be looked-up from a stored preimage). - /// - /// NOTE: This does not remove any data needed for realization. If you will no longer use the - /// `bounded`, call `realize` instead or call `drop` afterwards. - fn peek(bounded: &Bounded) -> Result<(T, Option), DispatchError> { - use Bounded::*; - match bounded { - Inline(data) => T::decode(&mut &data[..]).ok().map(|x| (x, None)), - Lookup { hash, len } => { - let data = Self::fetch(hash, Some(*len))?; - T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) - }, - Legacy { hash, .. } => { - let data = Self::fetch(hash, None)?; - T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) - }, - } - .ok_or(DispatchError::Corruption) - } - - /// Convert the given `bounded` value back into its original instance. If successful, - /// `drop` any data backing it. This will not break the realisability of independently - /// created instances of `Bounded` which happen to have identical data. - fn realize(bounded: &Bounded) -> Result<(T, Option), DispatchError> { - let r = Self::peek(bounded)?; - Self::drop(bounded); - Ok(r) - } -} - -/// A interface for managing preimages to hashes on chain. -/// -/// Note that this API does not assume any underlying user is calling, and thus -/// does not handle any preimage ownership or fees. Other system level logic that -/// uses this API should implement that on their own side. -pub trait StorePreimage: QueryPreimage { - /// The maximum length of preimage we can store. - /// - /// This is the maximum length of the *encoded* value that can be passed to `bound`. - const MAX_LENGTH: usize; - - /// Request and attempt to store the bytes of a preimage on chain. - /// - /// May return `DispatchError::Exhausted` if the preimage is just too big. - fn note(bytes: Cow<[u8]>) -> Result; - - /// Attempt to clear a previously noted preimage. Exactly the same as `unrequest` but is - /// provided for symmetry. - fn unnote(hash: &Hash) { - Self::unrequest(hash) - } - - /// Convert an otherwise unbounded or large value into a type ready for placing in storage. The - /// result is a type whose `MaxEncodedLen` is 131 bytes. - /// - /// NOTE: Once this API is used, you should use either `drop` or `realize`. - fn bound(t: T) -> Result, DispatchError> { - let data = t.encode(); - let len = data.len() as u32; - Ok(match BoundedInline::try_from(data) { - Ok(bounded) => Bounded::Inline(bounded), - Err(unbounded) => Bounded::Lookup { hash: Self::note(unbounded.into())?, len }, - }) - } -} - -impl QueryPreimage for () { - fn len(_: &Hash) -> Option { - None - } - fn fetch(_: &Hash, _: Option) -> FetchResult { - Err(DispatchError::Unavailable) - } - fn is_requested(_: &Hash) -> bool { - false - } - fn request(_: &Hash) {} - fn unrequest(_: &Hash) {} -} - -impl StorePreimage for () { - const MAX_LENGTH: usize = 0; - fn note(_: Cow<[u8]>) -> Result { - Err(DispatchError::Exhausted) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{bounded_vec, BoundedVec}; - - #[test] - fn bounded_size_is_correct() { - assert_eq!(> as MaxEncodedLen>::max_encoded_len(), 131); - } - - #[test] - fn bounded_basic_works() { - let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; - let len = data.len() as u32; - let hash = blake2_256(&data).into(); - - // Inline works - { - let bound: Bounded> = Bounded::Inline(data.clone()); - assert_eq!(bound.hash(), hash); - assert_eq!(bound.len(), Some(len)); - assert!(!bound.lookup_needed()); - assert_eq!(bound.lookup_len(), None); - } - // Legacy works - { - let bound: Bounded> = Bounded::Legacy { hash, dummy: Default::default() }; - assert_eq!(bound.hash(), hash); - assert_eq!(bound.len(), None); - assert!(bound.lookup_needed()); - assert_eq!(bound.lookup_len(), Some(1_000_000)); - } - // Lookup works - { - let bound: Bounded> = Bounded::Lookup { hash, len: data.len() as u32 }; - assert_eq!(bound.hash(), hash); - assert_eq!(bound.len(), Some(len)); - assert!(bound.lookup_needed()); - assert_eq!(bound.lookup_len(), Some(len)); - } - } - - #[test] - fn bounded_transmuting_works() { - let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; - - // Transmute a `String` into a `&str`. - let x: Bounded = Bounded::Inline(data.clone()); - let y: Bounded<&str> = x.transmute(); - assert_eq!(y, Bounded::Inline(data)); - } -} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs index b8e6a7f807904..39ebbb78321d6 100644 --- a/frame/support/src/traits/schedule.rs +++ b/frame/support/src/traits/schedule.rs @@ -17,8 +17,6 @@ //! Traits and associated utilities for scheduling dispatchables in FRAME. -#[allow(deprecated)] -use super::PreimageProvider; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{traits::Saturating, DispatchError, RuntimeDebug}; @@ -130,12 +128,11 @@ impl MaybeHashed { } } -// TODO: deprecate pub mod v1 { use super::*; /// A type that can be used as a scheduler. - pub trait Anon { + pub trait Anon { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + Debug + TypeInfo + MaxEncodedLen; @@ -146,7 +143,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: RuntimeOrigin, + origin: Origin, call: Call, ) -> Result; @@ -180,7 +177,7 @@ pub mod v1 { } /// A type that can be used as a scheduler. - pub trait Named { + pub trait Named { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug + MaxEncodedLen; @@ -192,7 +189,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: RuntimeOrigin, + origin: Origin, call: Call, ) -> Result; @@ -218,9 +215,9 @@ pub mod v1 { fn next_dispatch_time(id: Vec) -> Result; } - impl Anon for T + impl Anon for T where - T: v2::Anon, + T: v2::Anon, { type Address = T::Address; @@ -228,7 +225,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: RuntimeOrigin, + origin: Origin, call: Call, ) -> Result { let c = MaybeHashed::::Value(call); @@ -251,9 +248,9 @@ pub mod v1 { } } - impl Named for T + impl Named for T where - T: v2::Named, + T: v2::Named, { type Address = T::Address; @@ -262,7 +259,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: RuntimeOrigin, + origin: Origin, call: Call, ) -> Result { let c = MaybeHashed::::Value(call); @@ -286,12 +283,11 @@ pub mod v1 { } } -// TODO: deprecate pub mod v2 { use super::*; /// A type that can be used as a scheduler. - pub trait Anon { + pub trait Anon { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + Debug + TypeInfo + MaxEncodedLen; /// A means of expressing a call by the hash of its encoded data. @@ -304,7 +300,7 @@ pub mod v2 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: RuntimeOrigin, + origin: Origin, call: MaybeHashed, ) -> Result; @@ -338,7 +334,7 @@ pub mod v2 { } /// A type that can be used as a scheduler. - pub trait Named { + pub trait Named { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug + MaxEncodedLen; /// A means of expressing a call by the hash of its encoded data. @@ -352,7 +348,7 @@ pub mod v2 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: RuntimeOrigin, + origin: Origin, call: MaybeHashed, ) -> Result; @@ -379,97 +375,6 @@ pub mod v2 { } } -pub mod v3 { - use super::*; - use crate::traits::Bounded; - - /// A type that can be used as a scheduler. - pub trait Anon { - /// An address which can be used for removing a scheduled task. - type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + Debug + TypeInfo; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// This is not named. - fn schedule( - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Bounded, - ) -> Result; - - /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, - /// also. - /// - /// Will return an `Unavailable` error if the `address` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - /// - /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For - /// that, you must name the task explicitly using the `Named` trait. - fn cancel(address: Self::Address) -> Result<(), DispatchError>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. For periodic tasks, - /// this dispatch is guaranteed to succeed only before the *initial* execution; for - /// others, use `reschedule_named`. - /// - /// Will return an `Unavailable` error if the `address` is invalid. - fn reschedule( - address: Self::Address, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an `Unavailable` error if the `address` is invalid. - fn next_dispatch_time(address: Self::Address) -> Result; - } - - pub type TaskName = [u8; 32]; - - /// A type that can be used as a scheduler. - pub trait Named { - /// An address which can be used for removing a scheduled task. - type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + sp_std::fmt::Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// - `id`: The identity of the task. This must be unique and will return an error if not. - fn schedule_named( - id: TaskName, - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Bounded, - ) -> Result; - - /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances - /// of that, also. - /// - /// Will return an `Unavailable` error if the `id` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - fn cancel_named(id: TaskName) -> Result<(), DispatchError>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. - /// - /// Will return an `Unavailable` error if the `id` is invalid. - fn reschedule_named( - id: TaskName, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an `Unavailable` error if the `id` is invalid. - fn next_dispatch_time(id: TaskName) -> Result; - } -} - pub use v1::*; + +use super::PreimageProvider; diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs index 24653d1899836..e484140cc2fd9 100644 --- a/frame/support/src/traits/storage.rs +++ b/frame/support/src/traits/storage.rs @@ -17,9 +17,6 @@ //! Traits for encoding data related to pallet's storage items. -use crate::sp_std::collections::btree_set::BTreeSet; -use impl_trait_for_tuples::impl_for_tuples; -pub use sp_core::storage::TrackedStorageKey; use sp_std::prelude::*; /// An instance of a pallet in the storage. @@ -74,9 +71,7 @@ pub trait StorageInfoTrait { fn storage_info() -> Vec; } -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +#[impl_trait_for_tuples::impl_for_tuples(30)] impl StorageInfoTrait for Tuple { fn storage_info() -> Vec { let mut res = vec![]; @@ -92,29 +87,3 @@ impl StorageInfoTrait for Tuple { pub trait PartialStorageInfoTrait { fn partial_storage_info() -> Vec; } - -/// Allows a pallet to specify storage keys to whitelist during benchmarking. -/// This means those keys will be excluded from the benchmarking performance -/// calculation. -pub trait WhitelistedStorageKeys { - /// Returns a [`Vec`] indicating the storage keys that - /// should be whitelisted during benchmarking. This means that those keys - /// will be excluded from the benchmarking performance calculation. - fn whitelisted_storage_keys() -> Vec; -} - -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] -impl WhitelistedStorageKeys for Tuple { - fn whitelisted_storage_keys() -> Vec { - // de-duplicate the storage keys - let mut combined_keys: BTreeSet = BTreeSet::new(); - for_tuples!( #( - for storage_key in Tuple::whitelisted_storage_keys() { - combined_keys.insert(storage_key); - } - )* ); - combined_keys.into_iter().collect::>() - } -} diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs index d0beb66d34923..9a1634fd89313 100644 --- a/frame/support/src/traits/tokens/currency.rs +++ b/frame/support/src/traits/tokens/currency.rs @@ -26,7 +26,7 @@ use crate::{ traits::Get, }; use codec::MaxEncodedLen; -use sp_runtime::{traits::MaybeSerializeDeserialize, FixedPointOperand}; +use sp_runtime::traits::MaybeSerializeDeserialize; use sp_std::fmt::Debug; mod reservable; @@ -37,7 +37,7 @@ pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { /// The balance of an account. - type Balance: Balance + MaybeSerializeDeserialize + Debug + MaxEncodedLen + FixedPointOperand; + type Balance: Balance + MaybeSerializeDeserialize + Debug + MaxEncodedLen; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs index 0e75ccc22d050..ed9c3a1afa480 100644 --- a/frame/support/src/traits/tokens/fungible/balanced.rs +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -164,7 +164,7 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, ) -> Result { let old_balance = Self::balance(who); - let (mut new_balance, mut amount) = if Self::reducible_balance(who, false) < amount { + let (mut new_balance, mut amount) = if old_balance < amount { return Err(TokenError::NoFunds.into()) } else { (old_balance - amount, amount) @@ -186,9 +186,8 @@ pub trait Unbalanced: Inspect { /// Return the imbalance by which the account was reduced. fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { let old_balance = Self::balance(who); - let old_free_balance = Self::reducible_balance(who, false); - let (mut new_balance, mut amount) = if old_free_balance < amount { - (old_balance.saturating_sub(old_free_balance), old_free_balance) + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) } else { (old_balance - amount, amount) }; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index d151a05e7ca30..b832e3dec9937 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -31,7 +31,6 @@ pub mod metadata; pub use balanced::{Balanced, Unbalanced}; mod imbalance; pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; -pub mod roles; /// Trait for providing balance-inspection access to a set of named fungible assets. pub trait Inspect { diff --git a/frame/support/src/traits/tokens/fungibles/approvals.rs b/frame/support/src/traits/tokens/fungibles/approvals.rs index 48929955d9497..7a08f11cf042a 100644 --- a/frame/support/src/traits/tokens/fungibles/approvals.rs +++ b/frame/support/src/traits/tokens/fungibles/approvals.rs @@ -24,7 +24,7 @@ pub trait Inspect: super::Inspect { } pub trait Mutate: Inspect { - // Approve a delegate account to spend an amount of tokens owned by an owner + // Aprove a delegate account to spend an amount of tokens owned by an owner fn approve( asset: Self::AssetId, owner: &AccountId, diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs index 9e50ff834a874..a75832e4c440f 100644 --- a/frame/support/src/traits/tokens/fungibles/balanced.rs +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -163,7 +163,7 @@ pub trait Balanced: Inspect { /// **WARNING** /// Do not use this directly unless you want trouble, since it allows you to alter account balances /// without keeping the issuance up to date. It has no safeguards against accidentally creating -/// token imbalances in your system leading to accidental inflation or deflation. It's really just +/// token imbalances in your system leading to accidental imflation or deflation. It's really just /// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to /// use. pub trait Unbalanced: Inspect { @@ -185,7 +185,7 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, ) -> Result { let old_balance = Self::balance(asset, who); - let (mut new_balance, mut amount) = if Self::reducible_balance(asset, who, false) < amount { + let (mut new_balance, mut amount) = if old_balance < amount { return Err(TokenError::NoFunds.into()) } else { (old_balance - amount, amount) @@ -211,9 +211,8 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, ) -> Self::Balance { let old_balance = Self::balance(asset, who); - let old_free_balance = Self::reducible_balance(asset, who, false); - let (mut new_balance, mut amount) = if old_free_balance < amount { - (old_balance.saturating_sub(old_free_balance), old_free_balance) + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) } else { (old_balance - amount, amount) }; diff --git a/frame/support/src/traits/tokens/fungibles/roles.rs b/frame/support/src/traits/tokens/fungibles/roles.rs deleted file mode 100644 index 18fd1cc801210..0000000000000 --- a/frame/support/src/traits/tokens/fungibles/roles.rs +++ /dev/null @@ -1,29 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Inspect traits for Asset roles - -pub trait Inspect: super::Inspect { - // Get owner for an AssetId. - fn owner(asset: Self::AssetId) -> Option; - // Get issuer for an AssetId. - fn issuer(asset: Self::AssetId) -> Option; - // Get admin for an AssetId. - fn admin(asset: Self::AssetId) -> Option; - // Get freezer for an AssetId. - fn freezer(asset: Self::AssetId) -> Option; -} diff --git a/frame/support/src/traits/tokens/nonfungible.rs b/frame/support/src/traits/tokens/nonfungible.rs index 46ca573131127..fe0d2e729930e 100644 --- a/frame/support/src/traits/tokens/nonfungible.rs +++ b/frame/support/src/traits/tokens/nonfungible.rs @@ -65,16 +65,11 @@ pub trait Inspect { /// Interface for enumerating items in existence or owned by a given account over a collection /// of NFTs. pub trait InspectEnumerable: Inspect { - /// The iterator type for [`Self::items`]. - type ItemsIterator: Iterator; - /// The iterator type for [`Self::owned`]. - type OwnedIterator: Iterator; - /// Returns an iterator of the items within a `collection` in existence. - fn items() -> Self::ItemsIterator; + fn items() -> Box>; /// Returns an iterator of the items of all collections owned by `who`. - fn owned(who: &AccountId) -> Self::OwnedIterator; + fn owned(who: &AccountId) -> Box>; } /// Trait for providing an interface for NFT-like items which may be minted, burned and/or have @@ -154,15 +149,10 @@ impl< AccountId, > InspectEnumerable for ItemOf { - type ItemsIterator = >::ItemsIterator; - type OwnedIterator = - >::OwnedInCollectionIterator; - - fn items() -> Self::ItemsIterator { + fn items() -> Box> { >::items(&A::get()) } - - fn owned(who: &AccountId) -> Self::OwnedIterator { + fn owned(who: &AccountId) -> Box> { >::owned_in_collection(&A::get(), who) } } diff --git a/frame/support/src/traits/tokens/nonfungibles.rs b/frame/support/src/traits/tokens/nonfungibles.rs index ac007b5a67f1d..d043a87ce7c10 100644 --- a/frame/support/src/traits/tokens/nonfungibles.rs +++ b/frame/support/src/traits/tokens/nonfungibles.rs @@ -105,29 +105,20 @@ pub trait Inspect { /// Interface for enumerating items in existence or owned by a given account over many collections /// of NFTs. pub trait InspectEnumerable: Inspect { - /// The iterator type for [`Self::collections`]. - type CollectionsIterator: Iterator; - /// The iterator type for [`Self::items`]. - type ItemsIterator: Iterator; - /// The iterator type for [`Self::owned`]. - type OwnedIterator: Iterator; - /// The iterator type for [`Self::owned_in_collection`]. - type OwnedInCollectionIterator: Iterator; - /// Returns an iterator of the collections in existence. - fn collections() -> Self::CollectionsIterator; + fn collections() -> Box>; /// Returns an iterator of the items of a `collection` in existence. - fn items(collection: &Self::CollectionId) -> Self::ItemsIterator; + fn items(collection: &Self::CollectionId) -> Box>; /// Returns an iterator of the items of all collections owned by `who`. - fn owned(who: &AccountId) -> Self::OwnedIterator; + fn owned(who: &AccountId) -> Box>; /// Returns an iterator of the items of `collection` owned by `who`. fn owned_in_collection( collection: &Self::CollectionId, who: &AccountId, - ) -> Self::OwnedInCollectionIterator; + ) -> Box>; } /// Trait for providing the ability to create collections of nonfungible items. diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs deleted file mode 100644 index 640bb566a65af..0000000000000 --- a/frame/support/src/traits/try_runtime.rs +++ /dev/null @@ -1,138 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Try-runtime specific traits and types. - -use impl_trait_for_tuples::impl_for_tuples; -use sp_arithmetic::traits::AtLeast32BitUnsigned; -use sp_std::prelude::*; - -// Which state tests to execute. -#[derive(codec::Encode, codec::Decode, Clone)] -pub enum Select { - /// None of them. - None, - /// All of them. - All, - /// Run a fixed number of them in a round robin manner. - RoundRobin(u32), - /// Run only pallets who's name matches the given list. - /// - /// Pallet names are obtained from [`super::PalletInfoAccess`]. - Only(Vec>), -} - -impl Default for Select { - fn default() -> Self { - Select::None - } -} - -impl sp_std::fmt::Debug for Select { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - match self { - Select::RoundRobin(x) => write!(f, "RoundRobin({})", x), - Select::Only(x) => write!( - f, - "Only({:?})", - x.iter() - .map(|x| sp_std::str::from_utf8(x).unwrap_or("")) - .collect::>(), - ), - Select::All => write!(f, "All"), - Select::None => write!(f, "None"), - } - } -} - -#[cfg(feature = "std")] -impl sp_std::str::FromStr for Select { - type Err = &'static str; - fn from_str(s: &str) -> Result { - match s { - "all" | "All" => Ok(Select::All), - "none" | "None" => Ok(Select::None), - _ => - if s.starts_with("rr-") { - let count = s - .split_once('-') - .and_then(|(_, count)| count.parse::().ok()) - .ok_or("failed to parse count")?; - Ok(Select::RoundRobin(count)) - } else { - let pallets = s.split(',').map(|x| x.as_bytes().to_vec()).collect::>(); - Ok(Select::Only(pallets)) - }, - } - } -} - -/// Execute some checks to ensure the internal state of a pallet is consistent. -/// -/// Usually, these checks should check all of the invariants that are expected to be held on all of -/// the storage items of your pallet. -pub trait TryState { - /// Execute the state checks. - fn try_state(_: BlockNumber, _: Select) -> Result<(), &'static str>; -} - -#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] -#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] -#[cfg_attr(all(feature = "tuples-128"), impl_for_tuples(128))] -impl TryState - for Tuple -{ - for_tuples!( where #( Tuple: crate::traits::PalletInfoAccess )* ); - fn try_state(n: BlockNumber, targets: Select) -> Result<(), &'static str> { - match targets { - Select::None => Ok(()), - Select::All => { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::try_state(n.clone(), targets.clone())); )* ); - result - }, - Select::RoundRobin(len) => { - let functions: &[fn(BlockNumber, Select) -> Result<(), &'static str>] = - &[for_tuples!(#( Tuple::try_state ),*)]; - let skip = n.clone() % (functions.len() as u32).into(); - let skip: u32 = - skip.try_into().unwrap_or_else(|_| sp_runtime::traits::Bounded::max_value()); - let mut result = Ok(()); - for try_state_fn in functions.iter().cycle().skip(skip as usize).take(len as usize) - { - result = result.and(try_state_fn(n.clone(), targets.clone())); - } - result - }, - Select::Only(ref pallet_names) => { - let try_state_fns: &[( - &'static str, - fn(BlockNumber, Select) -> Result<(), &'static str>, - )] = &[for_tuples!( - #( (::name(), Tuple::try_state) ),* - )]; - let mut result = Ok(()); - for (name, try_state_fn) in try_state_fns { - if pallet_names.iter().any(|n| n == name.as_bytes()) { - result = result.and(try_state_fn(n.clone(), targets.clone())); - } - } - result - }, - } - } -} diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs index 49ae3163d0cd1..b5e7d27073895 100644 --- a/frame/support/src/traits/voting.rs +++ b/frame/support/src/traits/voting.rs @@ -106,19 +106,6 @@ pub trait VoteTally { fn rejection(class: Class) -> Self; #[cfg(feature = "runtime-benchmarks")] fn from_requirements(support: Perbill, approval: Perbill, class: Class) -> Self; - #[cfg(feature = "runtime-benchmarks")] - /// A function that should be called before any use of the `runtime-benchmarks` gated functions - /// of the `VoteTally` trait. - /// - /// Should be used to set up any needed state in a Pallet which implements `VoteTally` so that - /// benchmarks that execute will complete successfully. `class` can be used to set up a - /// particular class of voters, and `granularity` is used to determine the weight of one vote - /// relative to total unanimity. - /// - /// For example, in the case where there are a number of unique voters, and each voter has equal - /// voting weight, a granularity of `Perbill::from_rational(1, 1000)` should create `1_000` - /// users. - fn setup(class: Class, granularity: Perbill); } pub enum PollStatus { None, diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 9ff49b97bf21f..c37a72536bddf 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -15,8 +15,102 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Re-exports `sp-weights` public API, and contains benchmarked weight constants specific to -//! FRAME. +//! # Primitives for transaction weighting. +//! +//! Every dispatchable function is responsible for providing `#[weight = $x]` attribute. In this +//! snipped, `$x` can be any user provided struct that implements the following traits: +//! +//! - [`WeighData`]: the weight amount. +//! - [`ClassifyDispatch`]: class of the dispatch. +//! - [`PaysFee`]: whether this weight should be translated to fee and deducted upon dispatch. +//! +//! Substrate then bundles the output information of the three traits into [`DispatchInfo`] struct +//! and provides it by implementing the [`GetDispatchInfo`] for all `Call` both inner and outer call +//! types. +//! +//! Substrate provides two pre-defined ways to annotate weight: +//! +//! ### 1. Fixed values +//! +//! This can only be used when all 3 traits can be resolved statically. You have 3 degrees of +//! configuration: +//! +//! 1. Define only weight, **in which case `ClassifyDispatch` will be `Normal` and `PaysFee` will be +//! `Yes`**. +//! +//! ``` +//! # use frame_system::Config; +//! frame_support::decl_module! { +//! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = 1000] +//! fn dispatching(origin) { unimplemented!() } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! 2.1 Define weight and class, **in which case `PaysFee` would be `Yes`**. +//! +//! ``` +//! # use frame_system::Config; +//! # use frame_support::weights::DispatchClass; +//! frame_support::decl_module! { +//! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = (1000, DispatchClass::Operational)] +//! fn dispatching(origin) { unimplemented!() } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! 2.2 Define weight and `PaysFee`, **in which case `ClassifyDispatch` would be `Normal`**. +//! +//! ``` +//! # use frame_system::Config; +//! # use frame_support::weights::Pays; +//! frame_support::decl_module! { +//! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = (1000, Pays::No)] +//! fn dispatching(origin) { unimplemented!() } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! 3. Define all 3 parameters. +//! +//! ``` +//! # use frame_system::Config; +//! # use frame_support::weights::{DispatchClass, Pays}; +//! frame_support::decl_module! { +//! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = (1000, DispatchClass::Operational, Pays::No)] +//! fn dispatching(origin) { unimplemented!() } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! ### 2. Define weights as a function of input arguments. +//! +//! The arguments of the dispatch are available in the weight expressions as a borrowed value. +//! +//! ``` +//! # use frame_system::Config; +//! # use frame_support::weights::{DispatchClass, Pays}; +//! frame_support::decl_module! { +//! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = ( +//! *a as u64 + *b, +//! DispatchClass::Operational, +//! if *a > 1000 { Pays::Yes } else { Pays::No } +//! )] +//! fn dispatching(origin, a: u32, b: u64) { unimplemented!() } +//! } +//! } +//! # fn main() {} +//! ``` +//! FRAME assumes a weight of `1_000_000_000_000` equals 1 second of compute on a standard machine. //! //! Latest machine specification used to benchmark are: //! - Digital Ocean: ubuntu-s-2vcpu-4gb-ams3-01 @@ -30,13 +124,40 @@ mod extrinsic_weights; mod paritydb_weights; mod rocksdb_weights; -use crate::dispatch; -pub use sp_weights::*; +use crate::{ + dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}, + traits::Get, +}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use smallvec::SmallVec; +use sp_arithmetic::{ + traits::{BaseArithmetic, Saturating, Unsigned}, + Perbill, +}; +use sp_runtime::{ + generic::{CheckedExtrinsic, UncheckedExtrinsic}, + traits::{SaturatedConversion, SignedExtension}, + RuntimeDebug, +}; + +/// Re-export priority as type +pub use sp_runtime::transaction_validity::TransactionPriority; + +/// Numeric range of a transaction weight. +pub type Weight = u64; /// These constants are specific to FRAME, and the current implementation of its various components. /// For example: FRAME System, FRAME Executive, our FRAME support libraries, etc... pub mod constants { - pub use sp_weights::constants::*; + use super::Weight; + + pub const WEIGHT_PER_SECOND: Weight = 1_000_000_000_000; + pub const WEIGHT_PER_MILLIS: Weight = WEIGHT_PER_SECOND / 1000; // 1_000_000_000 + pub const WEIGHT_PER_MICROS: Weight = WEIGHT_PER_MILLIS / 1000; // 1_000_000 + pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 // Expose the Block and Extrinsic base weights. pub use super::{block_weights::BlockExecutionWeight, extrinsic_weights::ExtrinsicBaseWeight}; @@ -47,69 +168,829 @@ pub mod constants { }; } -#[deprecated = "Function has moved to `frame_support::dispatch`"] -pub fn extract_actual_pays_fee( - res: &dispatch::DispatchResultWithPostInfo, - info: &dispatch::DispatchInfo, -) -> dispatch::Pays { - dispatch::extract_actual_pays_fee(res, info) -} -#[deprecated = "Function has moved to `frame_support::dispatch`"] -pub fn extract_actual_weight( - res: &dispatch::DispatchResultWithPostInfo, - info: &dispatch::DispatchInfo, -) -> Weight { - dispatch::extract_actual_weight(res, info) -} -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait ClassifyDispatch: dispatch::ClassifyDispatch { - fn classify_dispatch(&self, target: T) -> dispatch::DispatchClass { - >::classify_dispatch(self, target) - } -} -#[deprecated = "Enum has moved to `frame_support::dispatch`"] -pub type DispatchClass = dispatch::DispatchClass; -#[deprecated = "Struct has moved to `frame_support::dispatch`"] -pub type DispatchInfo = dispatch::DispatchInfo; -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait GetDispatchInfo: dispatch::GetDispatchInfo { - fn get_dispatch_info(&self) -> dispatch::DispatchInfo { - ::get_dispatch_info(self) - } -} -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait OneOrMany: dispatch::OneOrMany { - fn into_iter(self) -> Self::Iter - where - Self: Sized, - { - >::into_iter(self) - } -} -#[deprecated = "Enum has moved to `frame_support::dispatch`"] -pub type Pays = dispatch::Pays; -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait PaysFee: dispatch::PaysFee { - fn pays_fee(&self, target: T) -> dispatch::Pays { - >::pays_fee(self, target) - } -} -#[deprecated = "Struct has moved to `frame_support::dispatch`"] -pub type PerDispatchClass = dispatch::PerDispatchClass; -#[deprecated = "Struct has moved to `frame_support::dispatch`"] -pub type PostDispatchInfo = dispatch::PostDispatchInfo; -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait WeighData: dispatch::WeighData { - fn weigh_data(&self, target: T) -> Weight { - >::weigh_data(self, target) - } -} -#[deprecated = "Trait has moved to `frame_support::dispatch`"] -pub trait WithPostDispatchInfo: dispatch::WithPostDispatchInfo { - fn with_weight(self, actual_weight: Weight) -> dispatch::DispatchErrorWithPostInfo - where - Self: Sized, - { - ::with_weight(self, actual_weight) +/// Means of weighing some particular kind of data (`T`). +pub trait WeighData { + /// Weigh the data `T` given by `target`. When implementing this for a dispatchable, `T` will be + /// a tuple of all arguments given to the function (except origin). + fn weigh_data(&self, target: T) -> Weight; +} + +/// Means of classifying a dispatchable function. +pub trait ClassifyDispatch { + /// Classify the dispatch function based on input data `target` of type `T`. When implementing + /// this for a dispatchable, `T` will be a tuple of all arguments given to the function (except + /// origin). + fn classify_dispatch(&self, target: T) -> DispatchClass; +} + +/// Indicates if dispatch function should pay fees or not. +/// If set to `Pays::No`, the block resource limits are applied, yet no fee is deducted. +pub trait PaysFee { + fn pays_fee(&self, _target: T) -> Pays; +} + +/// Explicit enum to denote if a transaction pays fee or not. +#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, TypeInfo)] +pub enum Pays { + /// Transactor will pay related fees. + Yes, + /// Transactor will NOT pay related fees. + No, +} + +impl Default for Pays { + fn default() -> Self { + Self::Yes + } +} + +/// A generalized group of dispatch types. +/// +/// NOTE whenever upgrading the enum make sure to also update +/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum DispatchClass { + /// A normal dispatch. + Normal, + /// An operational dispatch. + Operational, + /// A mandatory dispatch. These kinds of dispatch are always included regardless of their + /// weight, therefore it is critical that they are separately validated to ensure that a + /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just + /// means ensuring that the extrinsic can only be included once and that it is always very + /// light. + /// + /// Do *NOT* use it for extrinsics that can be heavy. + /// + /// The only real use case for this is inherent extrinsics that are required to execute in a + /// block for the block to be valid, and it solves the issue in the case that the block + /// initialization is sufficiently heavy to mean that those inherents do not fit into the + /// block. Essentially, we assume that in these exceptional circumstances, it is better to + /// allow an overweight block to be created than to not allow any block at all to be created. + Mandatory, +} + +impl Default for DispatchClass { + fn default() -> Self { + Self::Normal + } +} + +impl DispatchClass { + /// Returns an array containing all dispatch classes. + pub fn all() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] + } + + /// Returns an array of all dispatch classes except `Mandatory`. + pub fn non_mandatory() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational] + } +} + +/// A trait that represents one or many values of given type. +/// +/// Useful to accept as parameter type to let the caller pass either a single value directly +/// or an iterator. +pub trait OneOrMany { + /// The iterator type. + type Iter: Iterator; + /// Convert this item into an iterator. + fn into_iter(self) -> Self::Iter; +} + +impl OneOrMany for DispatchClass { + type Iter = sp_std::iter::Once; + fn into_iter(self) -> Self::Iter { + sp_std::iter::once(self) + } +} + +impl<'a> OneOrMany for &'a [DispatchClass] { + type Iter = sp_std::iter::Cloned>; + fn into_iter(self) -> Self::Iter { + self.iter().cloned() + } +} + +/// A bundle of static information collected from the `#[weight = $x]` attributes. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct DispatchInfo { + /// Weight of this transaction. + pub weight: Weight, + /// Class of this transaction. + pub class: DispatchClass, + /// Does this transaction pay fees. + pub pays_fee: Pays, +} + +/// A `Dispatchable` function (aka transaction) that can carry some static information along with +/// it, using the `#[weight]` attribute. +pub trait GetDispatchInfo { + /// Return a `DispatchInfo`, containing relevant information of this dispatch. + /// + /// This is done independently of its encoded size. + fn get_dispatch_info(&self) -> DispatchInfo; +} + +impl GetDispatchInfo for () { + fn get_dispatch_info(&self) -> DispatchInfo { + DispatchInfo::default() + } +} + +/// Weight information that is only available post dispatch. +/// NOTE: This can only be used to reduce the weight or fee, not increase it. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct PostDispatchInfo { + /// Actual weight consumed by a call or `None` which stands for the worst case static weight. + pub actual_weight: Option, + /// Whether this transaction should pay fees when all is said and done. + pub pays_fee: Pays, +} + +impl PostDispatchInfo { + /// Calculate how much (if any) weight was not used by the `Dispatchable`. + pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { + info.weight - self.calc_actual_weight(info) + } + + /// Calculate how much weight was actually spent by the `Dispatchable`. + pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight { + if let Some(actual_weight) = self.actual_weight { + actual_weight.min(info.weight) + } else { + info.weight + } + } + + /// Determine if user should actually pay fees at the end of the dispatch. + pub fn pays_fee(&self, info: &DispatchInfo) -> Pays { + // If they originally were not paying fees, or the post dispatch info + // says they should not pay fees, then they don't pay fees. + // This is because the pre dispatch information must contain the + // worst case for weight and fees paid. + if info.pays_fee == Pays::No || self.pays_fee == Pays::No { + Pays::No + } else { + // Otherwise they pay. + Pays::Yes + } + } +} + +/// Extract the actual weight from a dispatch result if any or fall back to the default weight. +pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Weight { + match result { + Ok(post_info) => post_info, + Err(err) => &err.post_info, + } + .calc_actual_weight(info) +} + +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { + match result { + Ok(post_info) => post_info, + Err(err) => &err.post_info, + } + .pays_fee(info) +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (actual_weight, pays_fee) = post_weight_info; + Self { actual_weight, pays_fee } + } +} + +impl From for PostDispatchInfo { + fn from(pays_fee: Pays) -> Self { + Self { actual_weight: None, pays_fee } + } +} + +impl From> for PostDispatchInfo { + fn from(actual_weight: Option) -> Self { + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl From<()> for PostDispatchInfo { + fn from(_: ()) -> Self { + Self { actual_weight: None, pays_fee: Default::default() } + } +} + +impl sp_runtime::traits::Printable for PostDispatchInfo { + fn print(&self) { + "actual_weight=".print(); + match self.actual_weight { + Some(weight) => weight.print(), + None => "max-weight".print(), + }; + "pays_fee=".print(); + match self.pays_fee { + Pays::Yes => "Yes".print(), + Pays::No => "No".print(), + } + } +} + +/// Allows easy conversion from `DispatchError` to `DispatchErrorWithPostInfo` for dispatchables +/// that want to return a custom a posterior weight on error. +pub trait WithPostDispatchInfo { + /// Call this on your modules custom errors type in order to return a custom weight on error. + /// + /// # Example + /// + /// ```ignore + /// let who = ensure_signed(origin).map_err(|e| e.with_weight(100))?; + /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); + /// ``` + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; +} + +impl WithPostDispatchInfo for T +where + T: Into, +{ + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(actual_weight), + pays_fee: Default::default(), + }, + error: self.into(), + } + } +} + +impl WeighData for Weight { + fn weigh_data(&self, _: T) -> Weight { + *self + } +} + +impl ClassifyDispatch for Weight { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for Weight { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (Weight, DispatchClass, Pays) { + fn weigh_data(&self, _: T) -> Weight { + self.0 + } +} + +impl ClassifyDispatch for (Weight, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (Weight, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (Weight, DispatchClass) { + fn weigh_data(&self, _: T) -> Weight { + self.0 + } +} + +impl ClassifyDispatch for (Weight, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (Weight, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (Weight, Pays) { + fn weigh_data(&self, _: T) -> Weight { + self.0 + } +} + +impl ClassifyDispatch for (Weight, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (Weight, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +/// Implementation for unchecked extrinsic. +impl GetDispatchInfo + for UncheckedExtrinsic +where + Call: GetDispatchInfo, + Extra: SignedExtension, +{ + fn get_dispatch_info(&self) -> DispatchInfo { + self.function.get_dispatch_info() + } +} + +/// Implementation for checked extrinsic. +impl GetDispatchInfo for CheckedExtrinsic +where + Call: GetDispatchInfo, +{ + fn get_dispatch_info(&self) -> DispatchInfo { + self.function.get_dispatch_info() + } +} + +/// Implementation for test extrinsic. +#[cfg(feature = "std")] +impl GetDispatchInfo for sp_runtime::testing::TestXt { + fn get_dispatch_info(&self) -> DispatchInfo { + // for testing: weight == size. + DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::Yes, ..Default::default() } + } +} + +/// The weight of database operations that the runtime can invoke. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct RuntimeDbWeight { + pub read: Weight, + pub write: Weight, +} + +impl RuntimeDbWeight { + pub fn reads(self, r: Weight) -> Weight { + self.read.saturating_mul(r) + } + + pub fn writes(self, w: Weight) -> Weight { + self.write.saturating_mul(w) + } + + pub fn reads_writes(self, r: Weight, w: Weight) -> Weight { + let read_weight = self.read.saturating_mul(r); + let write_weight = self.write.saturating_mul(w); + read_weight.saturating_add(write_weight) + } +} + +/// One coefficient and its position in the `WeightToFee`. +/// +/// One term of polynomial is calculated as: +/// +/// ```ignore +/// coeff_integer * x^(degree) + coeff_frac * x^(degree) +/// ``` +/// +/// The `negative` value encodes whether the term is added or substracted from the +/// overall polynomial result. +#[derive(Clone, Encode, Decode, TypeInfo)] +pub struct WeightToFeeCoefficient { + /// The integral part of the coefficient. + pub coeff_integer: Balance, + /// The fractional part of the coefficient. + pub coeff_frac: Perbill, + /// True iff the coefficient should be interpreted as negative. + pub negative: bool, + /// Degree/exponent of the term. + pub degree: u8, +} + +/// A list of coefficients that represent one polynomial. +pub type WeightToFeeCoefficients = SmallVec<[WeightToFeeCoefficient; 4]>; + +/// A trait that describes the weight to fee calculation. +pub trait WeightToFee { + /// The type that is returned as result from calculation. + type Balance: BaseArithmetic + From + Copy + Unsigned; + + /// Calculates the fee from the passed `weight`. + fn weight_to_fee(weight: &Weight) -> Self::Balance; +} + +/// A trait that describes the weight to fee calculation as polynomial. +/// +/// An implementor should only implement the `polynomial` function. +pub trait WeightToFeePolynomial { + /// The type that is returned as result from polynomial evaluation. + type Balance: BaseArithmetic + From + Copy + Unsigned; + + /// Returns a polynomial that describes the weight to fee conversion. + /// + /// This is the only function that should be manually implemented. Please note + /// that all calculation is done in the probably unsigned `Balance` type. This means + /// that the order of coefficients is important as putting the negative coefficients + /// first will most likely saturate the result to zero mid evaluation. + fn polynomial() -> WeightToFeeCoefficients; +} + +impl WeightToFee for T +where + T: WeightToFeePolynomial, +{ + type Balance = ::Balance; + + /// Calculates the fee from the passed `weight` according to the `polynomial`. + /// + /// This should not be overridden in most circumstances. Calculation is done in the + /// `Balance` type and never overflows. All evaluation is saturating. + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::polynomial() + .iter() + .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { + let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); + + // The sum could get negative. Therefore we only sum with the accumulator. + // The Perbill Mul implementation is non overflowing. + let frac = args.coeff_frac * w; + let integer = args.coeff_integer.saturating_mul(w); + + if args.negative { + acc = acc.saturating_sub(frac); + acc = acc.saturating_sub(integer); + } else { + acc = acc.saturating_add(frac); + acc = acc.saturating_add(integer); + } + + acc + }) + } +} + +/// Implementor of `WeightToFee` that maps one unit of weight to one unit of fee. +pub struct IdentityFee(sp_std::marker::PhantomData); + +impl WeightToFee for IdentityFee +where + T: BaseArithmetic + From + Copy + Unsigned, +{ + type Balance = T; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::Balance::saturated_from(*weight) + } +} + +/// Implementor of [`WeightToFee`] that uses a constant multiplier. +/// # Example +/// +/// ``` +/// # use frame_support::traits::ConstU128; +/// # use frame_support::weights::ConstantMultiplier; +/// // Results in a multiplier of 10 for each unit of weight (or length) +/// type LengthToFee = ConstantMultiplier::>; +/// ``` +pub struct ConstantMultiplier(sp_std::marker::PhantomData<(T, M)>); + +impl WeightToFee for ConstantMultiplier +where + T: BaseArithmetic + From + Copy + Unsigned, + M: Get, +{ + type Balance = T; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::Balance::saturated_from(*weight).saturating_mul(M::get()) + } +} + +/// A struct holding value for each `DispatchClass`. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct PerDispatchClass { + /// Value for `Normal` extrinsics. + normal: T, + /// Value for `Operational` extrinsics. + operational: T, + /// Value for `Mandatory` extrinsics. + mandatory: T, +} + +impl PerDispatchClass { + /// Create new `PerDispatchClass` with the same value for every class. + pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { + Self { + normal: val(DispatchClass::Normal), + operational: val(DispatchClass::Operational), + mandatory: val(DispatchClass::Mandatory), + } + } + + /// Get a mutable reference to current value of given class. + pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { + match class { + DispatchClass::Operational => &mut self.operational, + DispatchClass::Normal => &mut self.normal, + DispatchClass::Mandatory => &mut self.mandatory, + } + } + + /// Get current value for given class. + pub fn get(&self, class: DispatchClass) -> &T { + match class { + DispatchClass::Normal => &self.normal, + DispatchClass::Operational => &self.operational, + DispatchClass::Mandatory => &self.mandatory, + } + } +} + +impl PerDispatchClass { + /// Set the value of given class. + pub fn set(&mut self, new: T, class: impl OneOrMany) { + for class in class.into_iter() { + *self.get_mut(class) = new.clone(); + } + } +} + +impl PerDispatchClass { + /// Returns the total weight consumed by all extrinsics in the block. + pub fn total(&self) -> Weight { + let mut sum = 0; + for class in DispatchClass::all() { + sum = sum.saturating_add(*self.get(*class)); + } + sum + } + + /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. + pub fn add(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_add(weight); + } + + /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would + /// occur. + pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + let value = self.get_mut(class); + *value = value.checked_add(weight).ok_or(())?; + Ok(()) + } + + /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of + /// `Weight`. + pub fn sub(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_sub(weight); + } +} + +#[cfg(test)] +#[allow(dead_code)] +mod tests { + use super::*; + use crate::{decl_module, parameter_types, traits::Get}; + use smallvec::smallvec; + + pub trait Config: 'static { + type Origin; + type Balance; + type BlockNumber; + type DbWeight: Get; + type PalletInfo: crate::traits::PalletInfo; + } + + pub struct TraitImpl {} + + parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + } + + impl Config for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + type Balance = u32; + type DbWeight = DbWeight; + type PalletInfo = crate::tests::PanicPalletInfo; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin, system=self { + // no arguments, fixed weight + #[weight = 1000] + fn f00(_origin) { unimplemented!(); } + + #[weight = (1000, DispatchClass::Mandatory)] + fn f01(_origin) { unimplemented!(); } + + #[weight = (1000, Pays::No)] + fn f02(_origin) { unimplemented!(); } + + #[weight = (1000, DispatchClass::Operational, Pays::No)] + fn f03(_origin) { unimplemented!(); } + + // weight = a x 10 + b + #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = (0, DispatchClass::Operational, Pays::Yes)] + fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + fn f20(_origin) { unimplemented!(); } + + #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + fn f21(_origin) { unimplemented!(); } + + } + } + + #[test] + fn weights_are_correct() { + // #[weight = 1000] + let info = Call::::f00 {}.get_dispatch_info(); + assert_eq!(info.weight, 1000); + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (1000, DispatchClass::Mandatory)] + let info = Call::::f01 {}.get_dispatch_info(); + assert_eq!(info.weight, 1000); + assert_eq!(info.class, DispatchClass::Mandatory); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (1000, Pays::No)] + let info = Call::::f02 {}.get_dispatch_info(); + assert_eq!(info.weight, 1000); + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::No); + + // #[weight = (1000, DispatchClass::Operational, Pays::No)] + let info = Call::::f03 {}.get_dispatch_info(); + assert_eq!(info.weight, 1000); + assert_eq!(info.class, DispatchClass::Operational); + assert_eq!(info.pays_fee, Pays::No); + + // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, 150); // 13*10 + 20 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (0, DispatchClass::Operational, Pays::Yes)] + let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, 0); + assert_eq!(info.class, DispatchClass::Operational); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + let info = Call::::f20 {}.get_dispatch_info(); + assert_eq!(info.weight, 12300); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + let info = Call::::f21 {}.get_dispatch_info(); + assert_eq!(info.weight, 45600); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + } + + #[test] + fn extract_actual_weight_works() { + let pre = DispatchInfo { weight: 1000, ..Default::default() }; + assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), 7); + assert_eq!(extract_actual_weight(&Ok(Some(1000).into()), &pre), 1000); + assert_eq!(extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), 9); + } + + #[test] + fn extract_actual_weight_caps_at_pre_weight() { + let pre = DispatchInfo { weight: 1000, ..Default::default() }; + assert_eq!(extract_actual_weight(&Ok(Some(1250).into()), &pre), 1000); + assert_eq!( + extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(1300)), &pre), + 1000 + ); + } + + #[test] + fn extract_actual_pays_fee_works() { + let pre = DispatchInfo { weight: 1000, ..Default::default() }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); + assert_eq!( + extract_actual_pays_fee(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), + Pays::Yes + ); + assert_eq!( + extract_actual_pays_fee( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }, + error: DispatchError::BadOrigin, + }), + &pre + ), + Pays::No + ); + + let pre = DispatchInfo { weight: 1000, pays_fee: Pays::No, ..Default::default() }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); + } + + type Balance = u64; + + // 0.5x^3 + 2.333x^2 + 7x - 10_000 + struct Poly; + impl WeightToFeePolynomial for Poly { + type Balance = Balance; + + fn polynomial() -> WeightToFeeCoefficients { + smallvec![ + WeightToFeeCoefficient { + coeff_integer: 0, + coeff_frac: Perbill::from_float(0.5), + negative: false, + degree: 3 + }, + WeightToFeeCoefficient { + coeff_integer: 2, + coeff_frac: Perbill::from_rational(1u32, 3u32), + negative: false, + degree: 2 + }, + WeightToFeeCoefficient { + coeff_integer: 7, + coeff_frac: Perbill::zero(), + negative: false, + degree: 1 + }, + WeightToFeeCoefficient { + coeff_integer: 10_000, + coeff_frac: Perbill::zero(), + negative: true, + degree: 0 + }, + ] + } + } + + #[test] + fn polynomial_works() { + // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 + assert_eq!(Poly::weight_to_fee(&100), 514033); + // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 + assert_eq!(Poly::weight_to_fee(&10_123), 518917034928); + } + + #[test] + fn polynomial_does_not_underflow() { + assert_eq!(Poly::weight_to_fee(&0), 0); + assert_eq!(Poly::weight_to_fee(&10), 0); + } + + #[test] + fn polynomial_does_not_overflow() { + assert_eq!(Poly::weight_to_fee(&Weight::max_value()), Balance::max_value() - 10_000); + } + + #[test] + fn identity_fee_works() { + assert_eq!(IdentityFee::::weight_to_fee(&0), 0); + assert_eq!(IdentityFee::::weight_to_fee(&50), 50); + assert_eq!( + IdentityFee::::weight_to_fee(&Weight::max_value()), + Balance::max_value() + ); + } + + #[test] + fn constant_fee_works() { + use crate::traits::ConstU128; + assert_eq!(ConstantMultiplier::>::weight_to_fee(&0), 0); + assert_eq!(ConstantMultiplier::>::weight_to_fee(&50), 500); + assert_eq!(ConstantMultiplier::>::weight_to_fee(&16), 16384); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&2), + u128::MAX + ); } } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 5c8e1f1c86e9d..b86334514af2f 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +16,12 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07 (Y/M/D) -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24 (Y/M/D) //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` //! WEIGHT-PATH: `./frame/support/src/weights/` -//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` +//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1`, WEIGHT-ADD: `0` // Executed Command: // ./target/production/substrate @@ -32,33 +31,34 @@ // --execution=wasm // --wasm-execution=compiled // --weight-path=./frame/support/src/weights/ -// --header=./HEADER-APACHE2 // --warmup=10 // --repeat=100 -use sp_core::parameter_types; -use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; +use frame_support::{ + parameter_types, + weights::{constants::WEIGHT_PER_NANOS, Weight}, +}; parameter_types! { /// Time to execute an empty block. - /// Calculated by multiplying the *Average* with `1.0` and adding `0`. + /// Calculated by multiplying the *Average* with `1` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 351_000, 392_617 - /// Average: 358_523 - /// Median: 359_836 - /// Std-Dev: 6698.67 + /// Min, Max: 5_303_128, 5_507_784 + /// Average: 5_346_284 + /// Median: 5_328_139 + /// Std-Dev: 41749.5 /// /// Percentiles nanoseconds: - /// 99th: 390_723 - /// 95th: 365_799 - /// 75th: 361_582 - pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(358_523); + /// 99th: 5_489_273 + /// 95th: 5_433_314 + /// 75th: 5_354_812 + pub const BlockExecutionWeight: Weight = 5_346_284 * WEIGHT_PER_NANOS; } #[cfg(test)] mod test_weights { - use sp_weights::constants; + use frame_support::weights::constants; /// Checks that the weight exists and is sane. // NOTE: If this test fails but you are sure that the generated values are fine, @@ -68,14 +68,8 @@ mod test_weights { let w = super::BlockExecutionWeight::get(); // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), - "Weight should be at least 100 µs." - ); + assert!(w >= 100 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), - "Weight should be at most 50 ms." - ); + assert!(w <= 50 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); } } diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 1db2281dfe488..b8a52c164d8fe 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +16,12 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07 (Y/M/D) -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24 (Y/M/D) //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` //! WEIGHT-PATH: `./frame/support/src/weights/` -//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` +//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1`, WEIGHT-ADD: `0` // Executed Command: // ./target/production/substrate @@ -32,33 +31,34 @@ // --execution=wasm // --wasm-execution=compiled // --weight-path=./frame/support/src/weights/ -// --header=./HEADER-APACHE2 // --warmup=10 // --repeat=100 -use sp_core::parameter_types; -use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; +use frame_support::{ + parameter_types, + weights::{constants::WEIGHT_PER_NANOS, Weight}, +}; parameter_types! { /// Time to execute a NO-OP extrinsic, for example `System::remark`. - /// Calculated by multiplying the *Average* with `1.0` and adding `0`. + /// Calculated by multiplying the *Average* with `1` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 98_722, 101_420 - /// Average: 98_974 - /// Median: 98_951 - /// Std-Dev: 271.62 + /// Min, Max: 86_060, 86_999 + /// Average: 86_298 + /// Median: 86_248 + /// Std-Dev: 207.19 /// /// Percentiles nanoseconds: - /// 99th: 99_202 - /// 95th: 99_163 - /// 75th: 99_030 - pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(98_974); + /// 99th: 86_924 + /// 95th: 86_828 + /// 75th: 86_347 + pub const ExtrinsicBaseWeight: Weight = 86_298 * WEIGHT_PER_NANOS; } #[cfg(test)] mod test_weights { - use sp_weights::constants; + use frame_support::weights::constants; /// Checks that the weight exists and is sane. // NOTE: If this test fails but you are sure that the generated values are fine, @@ -68,14 +68,8 @@ mod test_weights { let w = super::ExtrinsicBaseWeight::get(); // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), - "Weight should be at least 10 µs." - ); + assert!(w >= 10 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), - "Weight should be at most 1 ms." - ); + assert!(w <= constants::WEIGHT_PER_MILLIS, "Weight should be at most 1 ms."); } } diff --git a/frame/support/src/weights/paritydb_weights.rs b/frame/support/src/weights/paritydb_weights.rs index 344e6cf0ddb6e..572187ba78a92 100644 --- a/frame/support/src/weights/paritydb_weights.rs +++ b/frame/support/src/weights/paritydb_weights.rs @@ -16,23 +16,24 @@ // limitations under the License. pub mod constants { - use frame_support::weights::constants; - use sp_core::parameter_types; - use sp_weights::RuntimeDbWeight; + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; parameter_types! { /// ParityDB can be enabled with a feature flag, but is still experimental. These weights /// are available for brave runtime engineers who may want to try this out as default. pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_PER_NANOS.ref_time(), - write: 50_000 * constants::WEIGHT_PER_NANOS.ref_time(), + read: 8_000 * constants::WEIGHT_PER_NANOS, + write: 50_000 * constants::WEIGHT_PER_NANOS, }; } #[cfg(test)] mod test_db_weights { use super::constants::ParityDbWeight as W; - use sp_weights::constants; + use frame_support::weights::constants; /// Checks that all weights exist and have sane values. // NOTE: If this test fails but you are sure that the generated values are fine, @@ -41,20 +42,20 @@ pub mod constants { fn sane() { // At least 1 µs. assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().reads(1) >= constants::WEIGHT_PER_MICROS, "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().writes(1) >= constants::WEIGHT_PER_MICROS, "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().reads(1) <= constants::WEIGHT_PER_MILLIS, "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().writes(1) <= constants::WEIGHT_PER_MILLIS, "Write weight should be at most 1 ms." ); } diff --git a/frame/support/src/weights/rocksdb_weights.rs b/frame/support/src/weights/rocksdb_weights.rs index 4dec2d8c877ea..f37964dcbd825 100644 --- a/frame/support/src/weights/rocksdb_weights.rs +++ b/frame/support/src/weights/rocksdb_weights.rs @@ -16,23 +16,24 @@ // limitations under the License. pub mod constants { - use frame_support::weights::constants; - use sp_core::parameter_types; - use sp_weights::RuntimeDbWeight; + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; parameter_types! { /// By default, Substrate uses RocksDB, so this will be the weight used throughout /// the runtime. pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_PER_NANOS.ref_time(), - write: 100_000 * constants::WEIGHT_PER_NANOS.ref_time(), + read: 25_000 * constants::WEIGHT_PER_NANOS, + write: 100_000 * constants::WEIGHT_PER_NANOS, }; } #[cfg(test)] mod test_db_weights { use super::constants::RocksDbWeight as W; - use sp_weights::constants; + use frame_support::weights::constants; /// Checks that all weights exist and have sane values. // NOTE: If this test fails but you are sure that the generated values are fine, @@ -41,20 +42,20 @@ pub mod constants { fn sane() { // At least 1 µs. assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().reads(1) >= constants::WEIGHT_PER_MICROS, "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().writes(1) >= constants::WEIGHT_PER_MICROS, "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().reads(1) <= constants::WEIGHT_PER_MILLIS, "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().writes(1) <= constants::WEIGHT_PER_MILLIS, "Write weight should be at most 1 ms." ); } diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 471dba8df44e2..dd23d7e6b0d96 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -47,11 +47,9 @@ std = [ "sp-version/std", ] try-runtime = ["frame-support/try-runtime"] -# WARNING: -# Only CI runs with this feature enabled. This feature is for testing stuff related to the FRAME macros -# in conjunction with rust features. -frame-feature-testing = [] -frame-feature-testing-2 = [] +# WARNING: CI only execute pallet test with this feature, +# if the feature intended to be used outside, CI and this message need to be updated. +conditional-storage = [] # Disable ui tests disable-ui-tests = [] no-metadata-docs = ["frame-support/no-metadata-docs"] diff --git a/frame/support/test/compile_pass/src/lib.rs b/frame/support/test/compile_pass/src/lib.rs index b46f6c48a6d99..7850726048546 100644 --- a/frame/support/test/compile_pass/src/lib.rs +++ b/frame/support/test/compile_pass/src/lib.rs @@ -65,12 +65,12 @@ impl frame_system::Config for Runtime { type BlockHashCount = ConstU64<2400>; type Version = Version; type AccountData = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletInfo = PalletInfo; - type RuntimeCall = RuntimeCall; + type Call = Call; type DbWeight = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -82,7 +82,7 @@ impl frame_system::Config for Runtime { pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index 0ceeed42ff982..dd3fbd1f3020d 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -25,7 +25,7 @@ /// The configuration trait pub trait Config: 'static { /// The runtime origin type. - type RuntimeOrigin: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; + type Origin: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; /// The block number type. type BlockNumber: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; /// The information about the pallet setup in the runtime. @@ -36,7 +36,7 @@ pub trait Config: 'static { frame_support::decl_module! { /// Some test module - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } /// A PalletInfo implementation which just panics. diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index b1ace12936241..63747a9d560dc 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -22,10 +22,7 @@ #![recursion_limit = "128"] use codec::MaxEncodedLen; -use frame_support::{ - parameter_types, - traits::{CrateVersion, PalletInfo as _}, -}; +use frame_support::traits::{CrateVersion, PalletInfo as _}; use scale_info::TypeInfo; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -33,13 +30,14 @@ use sp_runtime::{ traits::{BlakeTwo256, Verify}, DispatchError, ModuleError, }; +use sp_std::cell::RefCell; mod system; pub trait Currency {} -parameter_types! { - pub static IntegrityTestExec: u32 = 0; +thread_local! { + pub static INTEGRITY_TEST_EXEC: RefCell = RefCell::new(0); } mod module1 { @@ -49,7 +47,7 @@ mod module1 { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::RuntimeOrigin, system=system + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -89,7 +87,7 @@ mod module2 { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::RuntimeOrigin, system=system + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -97,7 +95,7 @@ mod module2 { } fn integrity_test() { - IntegrityTestExec::mutate(|i| *i += 1); + INTEGRITY_TEST_EXEC.with(|i| *i.borrow_mut() += 1); } } } @@ -134,7 +132,7 @@ mod nested { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::RuntimeOrigin, system=system + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -142,7 +140,7 @@ mod nested { } fn integrity_test() { - IntegrityTestExec::mutate(|i| *i += 1); + INTEGRITY_TEST_EXEC.with(|i| *i.borrow_mut() += 1); } } } @@ -180,7 +178,7 @@ pub mod module3 { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::RuntimeOrigin, system=system + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -200,7 +198,7 @@ pub mod module3 { } #[weight = 3] fn aux_4(_origin) -> frame_support::dispatch::DispatchResult { unreachable!() } - #[weight = (5, frame_support::dispatch::DispatchClass::Operational)] + #[weight = (5, frame_support::weights::DispatchClass::Operational)] fn operational(_origin) { unreachable!() } } } @@ -247,12 +245,12 @@ fn test_pub() -> AccountId { impl system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletInfo = PalletInfo; - type RuntimeCall = RuntimeCall; + type Call = Call; type DbWeight = (); } @@ -280,106 +278,104 @@ frame_support::construct_runtime!( pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; #[test] fn check_modules_error_type() { - sp_io::TestExternalities::default().execute_with(|| { - assert_eq!( - Module1_1::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 31, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 32, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 33, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - NestedModule3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 34, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 6, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_4::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 3, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_5::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 4, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_6::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 1, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_7::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 2, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_8::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 12, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_9::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 13, - error: [0; 4], - message: Some("Something") - })), - ); - }) + assert_eq!( + Module1_1::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 31, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 32, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 33, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + NestedModule3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 34, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 6, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_4::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 3, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_5::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 4, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_6::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_7::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 2, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_8::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 12, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_9::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 13, + error: [0; 4], + message: Some("Something") + })), + ); } #[test] fn integrity_test_works() { __construct_runtime_integrity_test::runtime_integrity_tests(); - assert_eq!(IntegrityTestExec::get(), 2); + assert_eq!(INTEGRITY_TEST_EXEC.with(|i| *i.borrow()), 2); } #[test] @@ -422,53 +418,53 @@ fn event_codec() { use codec::Encode; let event = system::Event::::ExtrinsicSuccess; - assert_eq!(RuntimeEvent::from(event).encode()[0], 30); + assert_eq!(Event::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 31); + assert_eq!(Event::from(event).encode()[0], 31); let event = module2::Event::A; - assert_eq!(RuntimeEvent::from(event).encode()[0], 32); + assert_eq!(Event::from(event).encode()[0], 32); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 33); + assert_eq!(Event::from(event).encode()[0], 33); let event = nested::module3::Event::A; - assert_eq!(RuntimeEvent::from(event).encode()[0], 34); + assert_eq!(Event::from(event).encode()[0], 34); let event = module3::Event::A; - assert_eq!(RuntimeEvent::from(event).encode()[0], 35); + assert_eq!(Event::from(event).encode()[0], 35); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 4); + assert_eq!(Event::from(event).encode()[0], 4); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 1); + assert_eq!(Event::from(event).encode()[0], 1); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 2); + assert_eq!(Event::from(event).encode()[0], 2); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 12); + assert_eq!(Event::from(event).encode()[0], 12); let event = module1::Event::::A(test_pub()); - assert_eq!(RuntimeEvent::from(event).encode()[0], 13); + assert_eq!(Event::from(event).encode()[0], 13); } #[test] fn call_codec() { use codec::Encode; - assert_eq!(RuntimeCall::System(system::Call::noop {}).encode()[0], 30); - assert_eq!(RuntimeCall::Module1_1(module1::Call::fail {}).encode()[0], 31); - assert_eq!(RuntimeCall::Module2(module2::Call::fail {}).encode()[0], 32); - assert_eq!(RuntimeCall::Module1_2(module1::Call::fail {}).encode()[0], 33); - assert_eq!(RuntimeCall::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); - assert_eq!(RuntimeCall::Module3(module3::Call::fail {}).encode()[0], 35); - assert_eq!(RuntimeCall::Module1_4(module1::Call::fail {}).encode()[0], 3); - assert_eq!(RuntimeCall::Module1_6(module1::Call::fail {}).encode()[0], 1); - assert_eq!(RuntimeCall::Module1_7(module1::Call::fail {}).encode()[0], 2); - assert_eq!(RuntimeCall::Module1_8(module1::Call::fail {}).encode()[0], 12); - assert_eq!(RuntimeCall::Module1_9(module1::Call::fail {}).encode()[0], 13); + assert_eq!(Call::System(system::Call::noop {}).encode()[0], 30); + assert_eq!(Call::Module1_1(module1::Call::fail {}).encode()[0], 31); + assert_eq!(Call::Module2(module2::Call::fail {}).encode()[0], 32); + assert_eq!(Call::Module1_2(module1::Call::fail {}).encode()[0], 33); + assert_eq!(Call::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); + assert_eq!(Call::Module3(module3::Call::fail {}).encode()[0], 35); + assert_eq!(Call::Module1_4(module1::Call::fail {}).encode()[0], 3); + assert_eq!(Call::Module1_6(module1::Call::fail {}).encode()[0], 1); + assert_eq!(Call::Module1_7(module1::Call::fail {}).encode()[0], 2); + assert_eq!(Call::Module1_8(module1::Call::fail {}).encode()[0], 12); + assert_eq!(Call::Module1_9(module1::Call::fail {}).encode()[0], 13); } #[test] @@ -504,26 +500,18 @@ fn call_encode_is_correct_and_decode_works() { #[test] fn call_weight_should_attach_to_call_enum() { use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, - weights::Weight, + dispatch::{DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, Pays}, }; // operational. assert_eq!( module3::Call::::operational {}.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_ref_time(5), - class: DispatchClass::Operational, - pays_fee: Pays::Yes - }, + DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, ); // custom basic assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_ref_time(3), - class: DispatchClass::Normal, - pays_fee: Pays::Yes - }, + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, ); } @@ -537,7 +525,7 @@ fn call_name() { #[test] fn call_metadata() { use frame_support::dispatch::{CallMetadata, GetCallMetadata}; - let call = RuntimeCall::Module3(module3::Call::::aux_4 {}); + let call = Call::Module3(module3::Call::::aux_4 {}); let metadata = call.get_call_metadata(); let expected = CallMetadata { function_name: "aux_4".into(), pallet_name: "Module3".into() }; assert_eq!(metadata, expected); @@ -553,7 +541,7 @@ fn get_call_names() { #[test] fn get_module_names() { use frame_support::dispatch::GetCallMetadata; - let module_names = RuntimeCall::get_module_names(); + let module_names = Call::get_module_names(); assert_eq!( [ "System", @@ -575,13 +563,13 @@ fn get_module_names() { #[test] fn call_subtype_conversion() { use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; - let call = RuntimeCall::Module3(module3::Call::::fail {}); + let call = Call::Module3(module3::Call::::fail {}); let subcall: Option<&CallableCallFor> = call.is_sub_type(); let subcall_none: Option<&CallableCallFor> = call.is_sub_type(); assert_eq!(Some(&module3::Call::::fail {}), subcall); assert_eq!(None, subcall_none); - let from = RuntimeCall::from(subcall.unwrap().clone()); + let from = Call::from(subcall.unwrap().clone()); assert_eq!(from, call); } diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index 42fd87ca95c0e..38aa780766835 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -15,17 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::env; + #[rustversion::attr(not(stable), ignore)] #[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. - if std::env::var("RUN_UI_TESTS").is_err() { + if env::var("RUN_UI_TESTS").is_err() { return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("SKIP_WASM_BUILD", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/construct_runtime_ui/*.rs"); diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs index 7a074db9986a2..98cd1f197f619 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr index 5f1fccd43c549..608d57d6a97fc 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr @@ -1,19 +1,28 @@ error: Unexpected tokens, expected one of `=`, `,` - --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:29:43 + --> $DIR/both_use_and_excluded_parts.rs:29:43 | 29 | Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, | ^^^^^^^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:18:64 +error[E0412]: cannot find type `Call` in this scope + --> $DIR/both_use_and_excluded_parts.rs:18:64 + | +18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^ not found in this scope + | +help: consider importing one of these items + | +1 | use crate::pallet::Call; + | +1 | use frame_support_test::Call; + | +1 | use frame_system::Call; + | +1 | use test_pallet::Call; | -18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | - ^^^^^^^^^^^ not found in this scope - | | - | help: you might be missing a type parameter: `` error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:20:25 + --> $DIR/both_use_and_excluded_parts.rs:20:25 | 20 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs index 302b5af7ecd26..51be7e30bd3eb 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs @@ -20,7 +20,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr index c623ecfbf4cdf..4e31cfb75c074 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr @@ -1,19 +1,28 @@ error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. - --> tests/construct_runtime_ui/exclude_undefined_part.rs:34:34 + --> $DIR/exclude_undefined_part.rs:34:34 | 34 | Pallet: pallet exclude_parts { Call }, | ^^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/exclude_undefined_part.rs:23:64 +error[E0412]: cannot find type `Call` in this scope + --> $DIR/exclude_undefined_part.rs:23:64 + | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^ not found in this scope + | +help: consider importing one of these items + | +1 | use crate::pallet::Call; + | +1 | use frame_support_test::Call; + | +1 | use frame_system::Call; + | +1 | use test_pallet::Call; | -23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | - ^^^^^^^^^^^ not found in this scope - | | - | help: you might be missing a type parameter: `` error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/exclude_undefined_part.rs:25:25 + --> $DIR/exclude_undefined_part.rs:25:25 | 25 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs deleted file mode 100644 index 79b5632babd95..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs +++ /dev/null @@ -1,14 +0,0 @@ -use frame_support::construct_runtime; - -construct_runtime! { - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - #[cfg(test)] - System: frame_system::{Pallet, Call, Storage, Config, Event}, - } -} - -fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr deleted file mode 100644 index a86a839615aa0..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: `System` pallet declaration is feature gated, please remove any `#[cfg]` attributes - --> tests/construct_runtime_ui/feature_gated_system_pallet.rs:10:3 - | -10 | System: frame_system::{Pallet, Call, Storage, Config, Event}, - | ^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs deleted file mode 100644 index a1d39fa76ea85..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs +++ /dev/null @@ -1,15 +0,0 @@ -use frame_support::construct_runtime; - -construct_runtime! { - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: system::{Pallet}, - #[cfg(feature = 1)] - Balance: balances::{Config, Call}, - } -} - -fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr deleted file mode 100644 index 68366a3410bf1..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr +++ /dev/null @@ -1,6 +0,0 @@ -error: feature = 1 - ^ expected one of ``, `all`, `any`, `not` here - --> tests/construct_runtime_ui/invalid_meta_literal.rs:10:3 - | -10 | #[cfg(feature = 1)] - | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs index 2ca9676406579..c06333795e3c5 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs @@ -6,22 +6,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl test_pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index d35565fb933ac..1f08ab87c1f79 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -10,7 +10,7 @@ error: `Pallet` does not have the std feature enabled, this will cause the `test 49 | | } | |_^ | - = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in crate `test_pallet` --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 @@ -29,6 +29,19 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | +help: if you import `GenesisConfig`, refer to it directly + | +40 - construct_runtime! { +41 - pub enum Runtime where +42 - Block = Block, +43 - NodeBlock = Block, +44 - UncheckedExtrinsic = UncheckedExtrinsic +45 - { +46 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +47 - Pallet: test_pallet::{Pallet, Config}, +48 - } +49 - } + | error[E0283]: type annotations needed --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 diff --git a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs index 5691549c20f34..706d444f23590 100644 --- a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs +++ b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs @@ -8,7 +8,7 @@ mod pallet_old { } decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin {} + pub struct Module for enum Call where origin: T::Origin {} } } diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs index b7ccadb5e3e58..827d8a58af733 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs @@ -40,22 +40,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr index 99a543eef7a8a..161873866b6f3 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr @@ -10,4 +10,4 @@ error[E0080]: evaluation of constant value failed 83 | | } | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:74:1 | - = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::panic::panic_2021` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs index abe5c4cfeb343..1653e830f0b4f 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -15,22 +15,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index 6baf01e866f64..c162a22bb87b0 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,4 +13,4 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs index 9a6ac5c6251fb..b8f91cf4bc690 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -15,22 +15,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index ff8ecf3041bf6..2af4d3fb15000 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::event] defined, perhaps you should remov 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Event` in module `pallet` --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 @@ -32,3 +32,49 @@ help: consider importing this enum | 1 | use frame_system::Event; | +help: if you import `Event`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Event}, +57 - } +58 - } + | + +error[E0412]: cannot find type `Event` in module `pallet` + --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 + | +49 | / construct_runtime! { +50 | | pub enum Runtime where +51 | | Block = Block, +52 | | NodeBlock = Block, +... | +57 | | } +58 | | } + | |_^ not found in `pallet` + | + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::Event; + | +1 | use frame_system::Event; + | +help: if you import `Event`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Event}, +57 - } +58 - } + | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs index 4facf85e280c0..a61d545b3279e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -15,22 +15,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 046369e1112b0..1bc109a45ac57 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you sho 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in module `pallet` --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 @@ -32,6 +32,19 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | +help: if you import `GenesisConfig`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Config}, +57 - } +58 - } + | error[E0283]: type annotations needed --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs index 322fa2c297285..6e4764286ab41 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -15,22 +15,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 74af0c264cd5e..9f646469d86a8 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -13,13 +13,13 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- function or associated item `create_inherent` not found for this struct + | ------------------------ function or associated item `create_inherent` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -39,7 +39,7 @@ error[E0599]: no function or associated item named `is_inherent` found for struc --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- function or associated item `is_inherent` not found for this struct + | ------------------------ function or associated item `is_inherent` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -59,7 +59,7 @@ error[E0599]: no function or associated item named `check_inherent` found for st --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- function or associated item `check_inherent` not found for this struct + | ------------------------ function or associated item `check_inherent` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -79,7 +79,7 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- associated item `INHERENT_IDENTIFIER` not found for this struct + | ------------------------ associated item `INHERENT_IDENTIFIER` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -99,7 +99,7 @@ error[E0599]: no function or associated item named `is_inherent_required` found --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- function or associated item `is_inherent_required` not found for this struct + | ------------------------ function or associated item `is_inherent_required` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs index 55cd5b545d6ba..9233404a865b9 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -15,22 +15,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 4907053b12877..c692cd61bae8b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remo 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Origin` in module `pallet` --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 @@ -32,8 +32,21 @@ help: consider importing this type alias | 1 | use frame_system::Origin; | +help: if you import `Origin`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Origin}, +57 - } +58 - } + | -error[E0282]: type annotations needed +error[E0412]: cannot find type `Origin` in module `pallet` --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 | 49 | / construct_runtime! { @@ -43,10 +56,39 @@ error[E0282]: type annotations needed ... | 57 | | } 58 | | } - | |_^ cannot infer type of the type parameter `AccountId` declared on the enum `RawOrigin` + | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider specifying the generic argument +help: consider importing one of these items + | +1 | use crate::Origin; | -58 | }:: - | +++++++++++++ +1 | use frame_system::Origin; + | +help: if you import `Origin`, refer to it directly + | +49 - construct_runtime! { +50 - pub enum Runtime where +51 - Block = Block, +52 - NodeBlock = Block, +53 - UncheckedExtrinsic = UncheckedExtrinsic +54 - { +55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, +56 - Pallet: pallet::{Pallet, Origin}, +57 - } +58 - } + | + +error[E0282]: type annotations needed + --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 + | +49 | / construct_runtime! { +50 | | pub enum Runtime where +51 | | Block = Block, +52 | | NodeBlock = Block, +... | +57 | | } +58 | | } + | |_^ cannot infer type for type parameter `AccountId` declared on the enum `RawOrigin` + | + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs index 0cf305a7dc055..621683aca3754 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -15,22 +15,22 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index 6f0b13c58933e..94226075d9a4b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,27 +13,22 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope +error[E0599]: no variant or associated item named `Pallet` found for enum `Call` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:56:3 | -49 | / construct_runtime! { -50 | | pub enum Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -56 | | Pallet: pallet::{Pallet, ValidateUnsigned}, - | | ^^^^^^ variant or associated item not found in `RuntimeCall` -57 | | } -58 | | } - | |_- variant or associated item `Pallet` not found for this enum +49 | construct_runtime! { + | ------------------ variant or associated item `Pallet` not found here +... +56 | Pallet: pallet::{Pallet, ValidateUnsigned}, + | ^^^^^^ variant or associated item not found in `Call` error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- function or associated item `pre_dispatch` not found for this struct + | ------------------------ function or associated item `pre_dispatch` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -54,7 +49,7 @@ error[E0599]: no function or associated item named `validate_unsigned` found for --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); - | -------------------- function or associated item `validate_unsigned` not found for this struct + | ------------------------ function or associated item `validate_unsigned` not found for this ... 49 | / construct_runtime! { 50 | | pub enum Runtime where diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs deleted file mode 100644 index b93adf9a780a7..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs +++ /dev/null @@ -1,15 +0,0 @@ -use frame_support::construct_runtime; - -construct_runtime! { - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: system::{Pallet}, - #[cfg(feature(test))] - Balance: balances::{Config, Call}, - } -} - -fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr deleted file mode 100644 index 98d99a0d34997..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr +++ /dev/null @@ -1,6 +0,0 @@ -error: feature(test) - ^ expected one of `=`, `,`, `)` here - --> tests/construct_runtime_ui/unsupported_meta_structure.rs:10:3 - | -10 | #[cfg(feature(test))] - | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs deleted file mode 100644 index 3ec8b9db1d435..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs +++ /dev/null @@ -1,15 +0,0 @@ -use frame_support::construct_runtime; - -construct_runtime! { - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: system::{Pallet}, - #[attr] - Balance: balances::{Config, Call}, - } -} - -fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr deleted file mode 100644 index fceb2b8a99db8..0000000000000 --- a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Unsupported attribute, only #[cfg] is supported on pallet declarations in `construct_runtime` - --> tests/construct_runtime_ui/unsupported_pallet_attr.rs:10:3 - | -10 | #[attr] - | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs index c74e29bc05469..1664dcc42b755 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs @@ -20,7 +20,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr index e289c75fb008a..ed41f0ce673a4 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr @@ -1,19 +1,28 @@ error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. - --> tests/construct_runtime_ui/use_undefined_part.rs:34:30 + --> $DIR/use_undefined_part.rs:34:30 | 34 | Pallet: pallet use_parts { Call }, | ^^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/use_undefined_part.rs:23:64 +error[E0412]: cannot find type `Call` in this scope + --> $DIR/use_undefined_part.rs:23:64 + | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^ not found in this scope + | +help: consider importing one of these items + | +1 | use crate::pallet::Call; + | +1 | use frame_support_test::Call; + | +1 | use frame_system::Call; + | +1 | use test_pallet::Call; | -23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | - ^^^^^^^^^^^ not found in this scope - | | - | help: you might be missing a type parameter: `` error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/use_undefined_part.rs:25:25 + --> $DIR/use_undefined_part.rs:25:25 | 25 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs index 0f662b96e2b13..cc7c1ff219d8b 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn integrity_test() {} fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 4212707599d41..880695d9b77e2 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -2,7 +2,7 @@ error: `integrity_test` can only be passed once as input. --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} @@ -10,7 +10,7 @@ error: `integrity_test` can only be passed once as input. 7 | | } | |_^ | - = note: this error originates in the macro `$crate::decl_module` which comes from the expansion of the macro `frame_support::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:7:2 diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index ea0746b1c501c..18aaec12c5f39 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn on_initialize() -> Weight { 0 } diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 94bde853e4cc8..369be77b8d249 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -1,8 +1,8 @@ error: `on_initialize` can only be passed once as input. - --> tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs:1:1 + --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | @@ -10,4 +10,4 @@ error: `on_initialize` can only be passed once as input. 11 | | } | |_^ | - = note: this error originates in the macro `$crate::decl_module` which comes from the expansion of the macro `frame_support::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index a4e420eb454b6..7ce43cd5d44d1 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -23,7 +23,7 @@ mod tests { use sp_io::TestExternalities; frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } pub trait Config: frame_support_test::Config { @@ -94,7 +94,7 @@ mod tests { struct TraitImpl {} impl frame_support_test::Config for TraitImpl { - type RuntimeOrigin = u32; + type Origin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); @@ -617,7 +617,7 @@ mod test2 { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } type PairOf = (T, T); @@ -639,7 +639,7 @@ mod test2 { struct TraitImpl {} impl frame_support_test::Config for TraitImpl { - type RuntimeOrigin = u32; + type Origin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); @@ -695,7 +695,7 @@ mod test3 { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { trait Store for Module as Test { @@ -708,7 +708,7 @@ mod test3 { struct TraitImpl {} impl frame_support_test::Config for TraitImpl { - type RuntimeOrigin = u32; + type Origin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); @@ -726,7 +726,7 @@ mod test_append_and_len { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } #[derive(PartialEq, Eq, Clone, Encode, Decode, scale_info::TypeInfo)] @@ -753,7 +753,7 @@ mod test_append_and_len { struct Test {} impl frame_support_test::Config for Test { - type RuntimeOrigin = u32; + type Origin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs index af74823f64ca6..db2cdbdc65492 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs index 8805effd9e725..b804bf8980383 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs index 5e05887110537..bc03ff6b4a4f9 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/frame/support/test/tests/derive_no_bound_ui/debug.stderr index acc7f80b37663..7580cab2ea0b3 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -1,8 +1,8 @@ error[E0277]: `::C` doesn't implement `std::fmt::Debug` - --> tests/derive_no_bound_ui/debug.rs:7:2 + --> $DIR/debug.rs:7:2 | 7 | c: T::C, | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::C` - = note: required for the cast from `::C` to the object type `dyn std::fmt::Debug` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index a7ad88c801001..c1723c6ad7a1b 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -28,7 +28,7 @@ mod no_instance { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { @@ -53,7 +53,7 @@ mod instance { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> - for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index f0094b3188ae1..367c7236d0158 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { @@ -31,7 +31,7 @@ struct Test; impl frame_support_test::Config for Test { type BlockNumber = u32; - type RuntimeOrigin = (); + type Origin = (); type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 043959b67ee6e..75a96f628245a 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -50,15 +50,15 @@ mod module1 { where ::BlockNumber: From, { - type RuntimeEvent: From> + Into<::RuntimeEvent>; - type RuntimeOrigin: From>; + type Event: From> + Into<::Event>; + type Origin: From>; type SomeParameter: Get; type GenericType: Default + Clone + Codec + EncodeLike + TypeInfo; } frame_support::decl_module! { pub struct Module, I: Instance> for enum Call where - origin: ::RuntimeOrigin, + origin: ::Origin, system = system, T::BlockNumber: From { @@ -154,15 +154,15 @@ mod module2 { pub trait Config: system::Config { type Amount: Parameter + Default; - type RuntimeEvent: From> + Into<::RuntimeEvent>; - type RuntimeOrigin: From>; + type Event: From> + Into<::Event>; + type Origin: From>; } impl, I: Instance> Currency for Module {} frame_support::decl_module! { pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::RuntimeOrigin, + origin: ::Origin, system = system { fn deposit_event() = default; @@ -228,41 +228,41 @@ mod module3 { } frame_support::decl_module! { - pub struct Module for enum Call where origin: ::RuntimeOrigin, system=system {} + pub struct Module for enum Call where origin: ::Origin, system=system {} } } impl module1::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; type SomeParameter = ConstU32<100>; type GenericType = u32; } impl module1::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; type SomeParameter = ConstU32<100>; type GenericType = u32; } impl module2::Config for Runtime { type Amount = u16; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; } impl module2::Config for Runtime { type Amount = u32; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; } impl module2::Config for Runtime { type Amount = u32; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; } impl module2::Config for Runtime { type Amount = u64; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; + type Event = Event; + type Origin = Origin; } impl module3::Config for Runtime { type Currency = Module2_2; @@ -277,12 +277,12 @@ pub type Index = u64; impl system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletInfo = PalletInfo; - type RuntimeCall = RuntimeCall; + type Call = Call; type DbWeight = (); } @@ -315,7 +315,7 @@ frame_support::construct_runtime!( pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig { diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 5d24d54165c1f..d7e3d2cb5b135 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -86,7 +86,7 @@ mod module { pub trait Config: system::Config + TypeInfo {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] @@ -155,17 +155,17 @@ pub type BlockNumber = u64; pub type Index = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletInfo = PalletInfo; - type RuntimeCall = RuntimeCall; + type Call = Call; type DbWeight = (); } diff --git a/frame/support/test/tests/origin.rs b/frame/support/test/tests/origin.rs index f794cd8be1ce0..cff531ff2e529 100644 --- a/frame/support/test/tests/origin.rs +++ b/frame/support/test/tests/origin.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! RuntimeOrigin tests for construct_runtime macro +//! Origin tests for construct_runtime macro #![recursion_limit = "128"] @@ -38,7 +38,7 @@ mod nested { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::RuntimeOrigin, system=system + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -80,7 +80,7 @@ pub mod module { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::RuntimeOrigin, system=system + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -100,7 +100,7 @@ pub mod module { } #[weight = 3] fn aux_4(_origin) -> frame_support::dispatch::DispatchResult { unreachable!() } - #[weight = (5, frame_support::dispatch::DispatchClass::Operational)] + #[weight = (5, frame_support::weights::DispatchClass::Operational)] fn operational(_origin) { unreachable!() } } } @@ -134,10 +134,10 @@ impl nested::module::Config for RuntimeOriginTest {} impl module::Config for RuntimeOriginTest {} pub struct BaseCallFilter; -impl Contains for BaseCallFilter { - fn contains(c: &RuntimeCall) -> bool { +impl Contains for BaseCallFilter { + fn contains(c: &Call) -> bool { match c { - RuntimeCall::NestedModule(_) => true, + Call::NestedModule(_) => true, _ => false, } } @@ -146,12 +146,12 @@ impl Contains for BaseCallFilter { impl system::Config for RuntimeOriginTest { type BaseCallFilter = BaseCallFilter; type Hash = H256; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = u32; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type PalletInfo = PalletInfo; - type RuntimeCall = RuntimeCall; + type Call = Call; type DbWeight = (); } @@ -170,7 +170,7 @@ frame_support::construct_runtime!( pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type Block = generic::Block; #[test] @@ -178,40 +178,40 @@ fn origin_default_filter() { let accepted_call = nested::module::Call::fail {}.into(); let rejected_call = module::Call::fail {}.into(); - assert_eq!(RuntimeOrigin::root().filter_call(&accepted_call), true); - assert_eq!(RuntimeOrigin::root().filter_call(&rejected_call), true); - assert_eq!(RuntimeOrigin::none().filter_call(&accepted_call), true); - assert_eq!(RuntimeOrigin::none().filter_call(&rejected_call), false); - assert_eq!(RuntimeOrigin::signed(0).filter_call(&accepted_call), true); - assert_eq!(RuntimeOrigin::signed(0).filter_call(&rejected_call), false); - assert_eq!(RuntimeOrigin::from(Some(0)).filter_call(&accepted_call), true); - assert_eq!(RuntimeOrigin::from(Some(0)).filter_call(&rejected_call), false); - assert_eq!(RuntimeOrigin::from(None).filter_call(&accepted_call), true); - assert_eq!(RuntimeOrigin::from(None).filter_call(&rejected_call), false); - assert_eq!(RuntimeOrigin::from(nested::module::Origin).filter_call(&accepted_call), true); - assert_eq!(RuntimeOrigin::from(nested::module::Origin).filter_call(&rejected_call), false); - - let mut origin = RuntimeOrigin::from(Some(0)); - origin.add_filter(|c| matches!(c, RuntimeCall::Module(_))); + assert_eq!(Origin::root().filter_call(&accepted_call), true); + assert_eq!(Origin::root().filter_call(&rejected_call), true); + assert_eq!(Origin::none().filter_call(&accepted_call), true); + assert_eq!(Origin::none().filter_call(&rejected_call), false); + assert_eq!(Origin::signed(0).filter_call(&accepted_call), true); + assert_eq!(Origin::signed(0).filter_call(&rejected_call), false); + assert_eq!(Origin::from(Some(0)).filter_call(&accepted_call), true); + assert_eq!(Origin::from(Some(0)).filter_call(&rejected_call), false); + assert_eq!(Origin::from(None).filter_call(&accepted_call), true); + assert_eq!(Origin::from(None).filter_call(&rejected_call), false); + assert_eq!(Origin::from(nested::module::Origin).filter_call(&accepted_call), true); + assert_eq!(Origin::from(nested::module::Origin).filter_call(&rejected_call), false); + + let mut origin = Origin::from(Some(0)); + origin.add_filter(|c| matches!(c, Call::Module(_))); assert_eq!(origin.filter_call(&accepted_call), false); assert_eq!(origin.filter_call(&rejected_call), false); // Now test for root origin and filters: - let mut origin = RuntimeOrigin::from(Some(0)); - origin.set_caller_from(RuntimeOrigin::root()); + let mut origin = Origin::from(Some(0)); + origin.set_caller_from(Origin::root()); assert!(matches!(origin.caller, OriginCaller::system(system::RawOrigin::Root))); // Root origin bypass all filter. assert_eq!(origin.filter_call(&accepted_call), true); assert_eq!(origin.filter_call(&rejected_call), true); - origin.set_caller_from(RuntimeOrigin::from(Some(0))); + origin.set_caller_from(Origin::from(Some(0))); // Back to another signed origin, the filtered are now effective again assert_eq!(origin.filter_call(&accepted_call), true); assert_eq!(origin.filter_call(&rejected_call), false); - origin.set_caller_from(RuntimeOrigin::root()); + origin.set_caller_from(Origin::root()); origin.reset_filter(); // Root origin bypass all filter, even when they are reset. diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 0fd32dad2242a..6b72327eb4989 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -16,16 +16,14 @@ // limitations under the License. use frame_support::{ - dispatch::{ - DispatchClass, DispatchInfo, GetDispatchInfo, Parameter, Pays, UnfilteredDispatchable, - }, + dispatch::{Parameter, UnfilteredDispatchable}, pallet_prelude::ValueQuery, storage::unhashed, traits::{ ConstU32, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, }, - weights::{RuntimeDbWeight, Weight}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight}, }; use scale_info::{meta_type, TypeInfo}; use sp_io::{ @@ -127,7 +125,7 @@ pub mod pallet { type Balance: Parameter + Default + TypeInfo; - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::extra_constants] @@ -167,7 +165,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(10)); - Weight::from_ref_time(10) + 10 } fn on_finalize(_: BlockNumberFor) { let _ = T::AccountId::from(SomeType1); // Test for where clause @@ -178,7 +176,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(30)); - Weight::from_ref_time(30) + 30 } fn integrity_test() { let _ = T::AccountId::from(SomeType1); // Test for where clause @@ -192,7 +190,7 @@ pub mod pallet { T::AccountId: From + From + SomeAssociation1, { /// Doc comment put in metadata - #[pallet::weight(Weight::from_ref_time(*_foo as u64))] + #[pallet::weight(Weight::from(*_foo))] pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, @@ -231,7 +229,6 @@ pub mod pallet { pub enum Error { /// doc comment put into metadata InsufficientProposersBalance, - NonExistentStorageValue, Code(u8), #[codec(skip)] Skipped(u128), @@ -285,10 +282,6 @@ pub mod pallet { pub type Map2 = StorageMap>; - #[pallet::storage] - pub type Map3 = - StorageMap<_, Blake2_128Concat, u32, u64, ResultQuery::NonExistentStorageValue>>; - #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; @@ -302,17 +295,6 @@ pub mod pallet { MaxValues = ConstU32<5>, >; - #[pallet::storage] - pub type DoubleMap3 = StorageDoubleMap< - _, - Blake2_128Concat, - u32, - Twox64Concat, - u64, - u128, - ResultQuery::NonExistentStorageValue>, - >; - #[pallet::storage] #[pallet::getter(fn nmap)] pub type NMap = StorageNMap<_, storage::Key, u32>; @@ -325,33 +307,24 @@ pub mod pallet { MaxValues = ConstU32<11>, >; - #[pallet::storage] - #[pallet::getter(fn nmap3)] - pub type NMap3 = StorageNMap< - _, - (NMapKey, NMapKey), - u128, - ResultQuery::NonExistentStorageValue>, - >; - #[pallet::storage] #[pallet::getter(fn conditional_value)] - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] pub type ConditionalValue = StorageValue<_, u32>; - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] #[pallet::storage] #[pallet::getter(fn conditional_map)] pub type ConditionalMap = StorageMap<_, Twox64Concat, u16, u32, OptionQuery, GetDefault, ConstU32<12>>; - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] #[pallet::storage] #[pallet::getter(fn conditional_double_map)] pub type ConditionalDoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] #[pallet::storage] #[pallet::getter(fn conditional_nmap)] pub type ConditionalNMap = @@ -479,7 +452,7 @@ pub mod pallet2 { where ::AccountId: From + SomeAssociation1, { - type RuntimeEvent: From + IsType<::RuntimeEvent>; + type Event: From + IsType<::Event>; } #[pallet::pallet] @@ -494,14 +467,14 @@ pub mod pallet2 { { fn on_initialize(_: BlockNumberFor) -> Weight { Self::deposit_event(Event::Something(11)); - Weight::zero() + 0 } fn on_finalize(_: BlockNumberFor) { Self::deposit_event(Event::Something(21)); } fn on_runtime_upgrade() -> Weight { Self::deposit_event(Event::Something(31)); - Weight::zero() + 0 } } @@ -552,11 +525,7 @@ pub mod pallet2 { #[frame_support::pallet] pub mod pallet3 { #[pallet::config] - pub trait Config: - frame_system::Config::RuntimeOrigin> - { - type RuntimeOrigin; - } + pub trait Config: frame_system::Config {} #[pallet::pallet] pub struct Pallet(_); @@ -574,36 +543,22 @@ pub mod pallet4 { impl Pallet {} } -/// Test that the supertrait check works when we pass some parameter to the `frame_system::Config`. -#[frame_support::pallet] -pub mod pallet5 { - #[pallet::config] - pub trait Config: - frame_system::Config::RuntimeOrigin> - { - type RuntimeOrigin; - } - - #[pallet::pallet] - pub struct Pallet(_); -} - frame_support::parameter_types!( pub const MyGetParam3: u32 = 12; ); impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); @@ -619,7 +574,7 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type MyGetParam = ConstU32<10>; type MyGetParam2 = ConstU32<11>; type MyGetParam3 = MyGetParam3; @@ -627,24 +582,14 @@ impl pallet::Config for Runtime { } impl pallet2::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } impl pallet4::Config for Runtime {} -#[cfg(feature = "frame-feature-testing")] -impl pallet3::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; -} - -#[cfg(feature = "frame-feature-testing-2")] -impl pallet5::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; -} - pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where @@ -656,19 +601,14 @@ frame_support::construct_runtime!( System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, Example2: pallet2 exclude_parts { Call }, - #[cfg(feature = "frame-feature-testing")] - Example3: pallet3, Example4: pallet4 use_parts { Call }, - - #[cfg(feature = "frame-feature-testing-2")] - Example5: pallet5, } ); -// Test that the part `RuntimeCall` is excluded from Example2 and included in Example4. -fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { +// Test that the part `Call` is excluded from Example2 and included in Example4. +fn _ensure_call_is_correctly_excluded_and_included(call: Call) { match call { - RuntimeCall::System(_) | RuntimeCall::Example(_) | RuntimeCall::Example4(_) => (), + Call::System(_) | Call::Example(_) | Call::Example4(_) => (), } } @@ -691,7 +631,7 @@ fn transactional_works() { .iter() .map(|e| &e.event) .collect::>(), - vec![&RuntimeEvent::Example(pallet::Event::Something(0))], + vec![&Event::Example(pallet::Event::Something(0))], ); }) } @@ -701,11 +641,7 @@ fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3, bar: 0 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { - weight: frame_support::weights::Weight::from_ref_time(3), - class: DispatchClass::Normal, - pays_fee: Pays::Yes - } + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -756,7 +692,7 @@ fn inherent_expand() { let inherents = InherentData::new().create_extrinsics(); let expected = vec![UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + function: Call::Example(pallet::Call::foo_no_post_info {}), signature: None, }]; assert_eq!(expected, inherents); @@ -771,11 +707,11 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + function: Call::Example(pallet::Call::foo_no_post_info {}), signature: None, }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), + function: Call::Example(pallet::Call::foo { foo: 1, bar: 0 }), signature: None, }, ], @@ -793,11 +729,11 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + function: Call::Example(pallet::Call::foo_no_post_info {}), signature: None, }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo { foo: 0, bar: 0 }), + function: Call::Example(pallet::Call::foo { foo: 0, bar: 0 }), signature: None, }, ], @@ -814,7 +750,7 @@ fn inherent_expand() { Digest::default(), ), vec![UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), + function: Call::Example(pallet::Call::foo_storage_layer { foo: 0 }), signature: None, }], ); @@ -832,7 +768,7 @@ fn inherent_expand() { Digest::default(), ), vec![UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + function: Call::Example(pallet::Call::foo_no_post_info {}), signature: Some((1, (), ())), }], ); @@ -851,11 +787,11 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), + function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), signature: None, }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), + function: Call::Example(pallet::Call::foo_storage_layer { foo: 0 }), signature: None, }, ], @@ -873,15 +809,15 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), + function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), signature: None, }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), + function: Call::Example(pallet::Call::foo_storage_layer { foo: 0 }), signature: None, }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + function: Call::Example(pallet::Call::foo_no_post_info {}), signature: None, }, ], @@ -899,15 +835,15 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), + function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), signature: None, }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), + function: Call::Example(pallet::Call::foo { foo: 1, bar: 0 }), signature: Some((1, (), ())), }, UncheckedExtrinsic { - function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + function: Call::Example(pallet::Call::foo_no_post_info {}), signature: None, }, ], @@ -951,7 +887,7 @@ fn pallet_expand_deposit_event() { .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example(pallet::Event::Something(3)), + Event::Example(pallet::Event::Something(3)), ); }) } @@ -998,16 +934,6 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); - pallet::Map3::::insert(1, 2); - let mut k = [twox_128(b"Example"), twox_128(b"Map3")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(2u64)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - pallet::Map3::::get(2), - Err(pallet::Error::::NonExistentStorageValue), - ); - pallet::DoubleMap::::insert(&1, &2, &3); let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -1022,17 +948,6 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); - pallet::DoubleMap3::::insert(&1, &2, &3); - let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap3")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - k.extend(2u64.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u128)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - pallet::DoubleMap3::::get(2, 3), - Err(pallet::Error::::NonExistentStorageValue), - ); - pallet::NMap::::insert((&1,), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -1046,18 +961,7 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); - pallet::NMap3::::insert((&1, &2), &3); - let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); - k.extend(1u8.using_encoded(blake2_128_concat)); - k.extend(2u16.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u128)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - pallet::NMap3::::get((2, 3)), - Err(pallet::Error::::NonExistentStorageValue), - ); - - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] { pallet::ConditionalValue::::put(1); pallet::ConditionalMap::::insert(1, 2); @@ -1083,34 +987,34 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllPalletsWithoutSystem::on_initialize(1), Weight::from_ref_time(10)); + assert_eq!(AllPalletsWithoutSystem::on_initialize(1), 10); AllPalletsWithoutSystem::on_finalize(1); - assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), Weight::from_ref_time(30)); + assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), 30); assert_eq!( frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example(pallet::Event::Something(10)), + Event::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - RuntimeEvent::Example2(pallet2::Event::Something(11)), + Event::Example2(pallet2::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - RuntimeEvent::Example(pallet::Event::Something(20)), + Event::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - RuntimeEvent::Example2(pallet2::Event::Something(21)), + Event::Example2(pallet2::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - RuntimeEvent::Example(pallet::Event::Something(30)), + Event::Example(pallet::Event::Something(30)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - RuntimeEvent::Example2(pallet2::Event::Something(31)), + Event::Example2(pallet2::Event::Something(31)), ); }) } @@ -1122,41 +1026,35 @@ fn all_pallets_type_reversed_order_is_correct() { #[allow(deprecated)] { - assert_eq!( - AllPalletsWithoutSystemReversed::on_initialize(1), - Weight::from_ref_time(10) - ); + assert_eq!(AllPalletsWithoutSystemReversed::on_initialize(1), 10); AllPalletsWithoutSystemReversed::on_finalize(1); - assert_eq!( - AllPalletsWithoutSystemReversed::on_runtime_upgrade(), - Weight::from_ref_time(30) - ); + assert_eq!(AllPalletsWithoutSystemReversed::on_runtime_upgrade(), 30); } assert_eq!( frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example2(pallet2::Event::Something(11)), + Event::Example2(pallet2::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - RuntimeEvent::Example(pallet::Event::Something(10)), + Event::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - RuntimeEvent::Example2(pallet2::Event::Something(21)), + Event::Example2(pallet2::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - RuntimeEvent::Example(pallet::Event::Something(20)), + Event::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - RuntimeEvent::Example2(pallet2::Event::Something(31)), + Event::Example2(pallet2::Event::Something(31)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - RuntimeEvent::Example(pallet::Event::Something(30)), + Event::Example(pallet::Event::Something(30)), ); }) } @@ -1197,16 +1095,8 @@ fn migrate_from_pallet_version_to_storage_version() { AllPalletsWithSystem, >(&db_weight); - let mut pallet_num = 4; - if cfg!(feature = "frame-feature-testing") { - pallet_num += 1; - }; - if cfg!(feature = "frame-feature-testing-2") { - pallet_num += 1; - }; - - // `pallet_num` pallets, 2 writes and every write costs 5 weight. - assert_eq!(Weight::from_ref_time(pallet_num * 2 * 5), weight); + // 4 pallets, 2 writes and every write costs 5 weight. + assert_eq!(4 * 2 * 5, weight); // All pallet versions should be removed assert!(sp_io::storage::get(&pallet_version_key(Example::name())).is_none()); @@ -1281,17 +1171,6 @@ fn metadata() { default: vec![0], docs: vec![], }, - StorageEntryMetadata { - name: "Map3", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], - }, - default: vec![1, 1], - docs: vec![], - }, StorageEntryMetadata { name: "DoubleMap", modifier: StorageEntryModifier::Optional, @@ -1320,20 +1199,6 @@ fn metadata() { default: vec![0], docs: vec![], }, - StorageEntryMetadata { - name: "DoubleMap3", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u32, u64)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - }, - default: vec![1, 1], - docs: vec![], - }, StorageEntryMetadata { name: "NMap", modifier: StorageEntryModifier::Optional, @@ -1359,21 +1224,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - StorageEntryMetadata { - name: "NMap3", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - value: meta_type::(), - }, - default: vec![1, 1], - docs: vec![], - }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { name: "ConditionalValue", modifier: StorageEntryModifier::Optional, @@ -1381,7 +1232,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { name: "ConditionalMap", modifier: StorageEntryModifier::Optional, @@ -1393,7 +1244,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { name: "ConditionalDoubleMap", modifier: StorageEntryModifier::Optional, @@ -1408,7 +1259,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] StorageEntryMetadata { name: "ConditionalNMap", modifier: StorageEntryModifier::Optional, @@ -1530,26 +1381,6 @@ fn metadata() { constants: vec![], error: None, }, - #[cfg(feature = "frame-feature-testing")] - PalletMetadata { - index: 3, - name: "Example3", - storage: None, - calls: None, - event: None, - constants: vec![], - error: None, - }, - #[cfg(feature = "frame-feature-testing-2")] - PalletMetadata { - index: 5, - name: "Example5", - storage: None, - calls: None, - event: None, - constants: vec![], - error: None, - }, ]; let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() && @@ -1605,8 +1436,6 @@ fn test_storage_info() { traits::{StorageInfo, StorageInfoTrait}, }; - // Storage max size is calculated by adding up all the hasher size, the key type size and the - // value type size assert_eq!( Example::storage_info(), vec![ @@ -1636,65 +1465,44 @@ fn test_storage_info() { storage_name: b"Map".to_vec(), prefix: prefix(b"Example", b"Map").to_vec(), max_values: None, - max_size: Some(16 + 1 + 2), + max_size: Some(3 + 16), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"Map2".to_vec(), prefix: prefix(b"Example", b"Map2").to_vec(), max_values: Some(3), - max_size: Some(8 + 2 + 4), - }, - StorageInfo { - pallet_name: b"Example".to_vec(), - storage_name: b"Map3".to_vec(), - prefix: prefix(b"Example", b"Map3").to_vec(), - max_values: None, - max_size: Some(16 + 4 + 8), + max_size: Some(6 + 8), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"DoubleMap".to_vec(), prefix: prefix(b"Example", b"DoubleMap").to_vec(), max_values: None, - max_size: Some(16 + 1 + 8 + 2 + 4), + max_size: Some(7 + 16 + 8), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"DoubleMap2".to_vec(), prefix: prefix(b"Example", b"DoubleMap2").to_vec(), max_values: Some(5), - max_size: Some(8 + 2 + 16 + 4 + 8), - }, - StorageInfo { - pallet_name: b"Example".to_vec(), - storage_name: b"DoubleMap3".to_vec(), - prefix: prefix(b"Example", b"DoubleMap3").to_vec(), - max_values: None, - max_size: Some(16 + 4 + 8 + 8 + 16), + max_size: Some(14 + 8 + 16), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"NMap".to_vec(), prefix: prefix(b"Example", b"NMap").to_vec(), max_values: None, - max_size: Some(16 + 1 + 4), + max_size: Some(5 + 16), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"NMap2".to_vec(), prefix: prefix(b"Example", b"NMap2").to_vec(), max_values: Some(11), - max_size: Some(8 + 2 + 16 + 4 + 8), + max_size: Some(14 + 8 + 16), }, - StorageInfo { - pallet_name: b"Example".to_vec(), - storage_name: b"NMap3".to_vec(), - prefix: prefix(b"Example", b"NMap3").to_vec(), - max_values: None, - max_size: Some(16 + 1 + 8 + 2 + 16), - }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] { StorageInfo { pallet_name: b"Example".to_vec(), @@ -1704,34 +1512,34 @@ fn test_storage_info() { max_size: Some(4), } }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] { StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"ConditionalMap".to_vec(), prefix: prefix(b"Example", b"ConditionalMap").to_vec(), max_values: Some(12), - max_size: Some(8 + 2 + 4), + max_size: Some(6 + 8), } }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] { StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"ConditionalDoubleMap".to_vec(), prefix: prefix(b"Example", b"ConditionalDoubleMap").to_vec(), max_values: None, - max_size: Some(16 + 1 + 8 + 2 + 4), + max_size: Some(7 + 16 + 8), } }, - #[cfg(feature = "frame-feature-testing")] + #[cfg(feature = "conditional-storage")] { StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"ConditionalNMap".to_vec(), prefix: prefix(b"Example", b"ConditionalNMap").to_vec(), max_values: None, - max_size: Some(16 + 1 + 8 + 2 + 4), + max_size: Some(7 + 16 + 8), } }, StorageInfo { @@ -1739,7 +1547,7 @@ fn test_storage_info() { storage_name: b"RenamedCountedMap".to_vec(), prefix: prefix(b"Example", b"RenamedCountedMap").to_vec(), max_values: None, - max_size: Some(8 + 1 + 4), + max_size: Some(1 + 4 + 8), }, StorageInfo { pallet_name: b"Example".to_vec(), @@ -1789,24 +1597,8 @@ fn test_storage_info() { #[test] fn assert_type_all_pallets_reversed_with_system_first_is_correct() { // Just ensure the 2 types are same. - #[allow(deprecated)] fn _a(_t: AllPalletsReversedWithSystemFirst) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example4, Example3, Example2, Example)) { - _a(t) - } - - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example5, Example4, Example2, Example)) { - _a(t) - } - - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example5, Example4, Example3, Example2, Example)) { + fn _b(t: (System, (Example4, (Example2, (Example,))))) { _a(t) } } @@ -1815,20 +1607,7 @@ fn assert_type_all_pallets_reversed_with_system_first_is_correct() { fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithSystem) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example, Example2, Example4)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example, Example2, Example3, Example4)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example, Example2, Example4, Example5)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example, Example2, Example3, Example4, Example5)) { + fn _b(t: (System, (Example, (Example2, (Example4,))))) { _a(t) } } @@ -1837,20 +1616,7 @@ fn assert_type_all_pallets_with_system_is_correct() { fn assert_type_all_pallets_without_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithoutSystem) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example, Example2, Example4)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example, Example2, Example3, Example4)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example, Example2, Example4, Example5)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example, Example2, Example3, Example4, Example5)) { + fn _b(t: (Example, (Example2, (Example4,)))) { _a(t) } } @@ -1858,22 +1624,8 @@ fn assert_type_all_pallets_without_system_is_correct() { #[test] fn assert_type_all_pallets_with_system_reversed_is_correct() { // Just ensure the 2 types are same. - #[allow(deprecated)] fn _a(_t: AllPalletsWithSystemReversed) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example2, Example, System)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example3, Example2, Example, System)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example2, Example, System)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example3, Example2, Example, System)) { + fn _b(t: (Example4, (Example2, (Example, (System,))))) { _a(t) } } @@ -1881,22 +1633,8 @@ fn assert_type_all_pallets_with_system_reversed_is_correct() { #[test] fn assert_type_all_pallets_without_system_reversed_is_correct() { // Just ensure the 2 types are same. - #[allow(deprecated)] fn _a(_t: AllPalletsWithoutSystemReversed) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example3, Example2, Example)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example3, Example2, Example)) { + fn _b(t: (Example4, (Example2, (Example,)))) { _a(t) } } diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 398137d644ee4..9327f5b6a3304 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -40,10 +40,10 @@ mod pallet_old { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + SomeAssociation; - type RuntimeEvent: From> + Into<::RuntimeEvent>; + type Event: From> + Into<::Event>; } decl_storage! { @@ -70,12 +70,12 @@ mod pallet_old { ); decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -85,7 +85,7 @@ mod pallet_old { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - Weight::from_ref_time(10) + 10 } fn on_finalize(_n: T::BlockNumber) { @@ -113,14 +113,14 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + SomeAssociation + scale_info::StaticTypeInfo; #[pallet::constant] type SomeConst: Get; - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::pallet] @@ -131,7 +131,7 @@ pub mod pallet { impl Hooks for Pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - Weight::from_ref_time(10) + 10 } fn on_finalize(_n: T::BlockNumber) { @@ -141,7 +141,7 @@ pub mod pallet { #[pallet::call] impl Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, @@ -226,16 +226,16 @@ pub mod pallet { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); @@ -251,19 +251,19 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index e8b5fe9fa33d4..3de45df223674 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -29,8 +29,8 @@ mod pallet_old { pub trait Config: frame_system::Config { type SomeConst: Get; - type Balance: Parameter + codec::HasCompact + From + Into + Default; - type RuntimeEvent: From> + Into<::RuntimeEvent>; + type Balance: Parameter + codec::HasCompact + From + Into + Default; + type Event: From> + Into<::Event>; } decl_storage! { @@ -56,13 +56,13 @@ mod pallet_old { decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: T::RuntimeOrigin + where origin: T::Origin { type Error = Error; fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -72,7 +72,7 @@ mod pallet_old { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - Weight::from_ref_time(10) + 10 } fn on_finalize(_n: T::BlockNumber) { @@ -99,14 +99,13 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + scale_info::StaticTypeInfo; #[pallet::constant] type SomeConst: Get; - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::pallet] @@ -117,7 +116,7 @@ pub mod pallet { impl, I: 'static> Hooks for Pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - Weight::from_ref_time(10) + 10 } fn on_finalize(_n: T::BlockNumber) { @@ -127,7 +126,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, @@ -210,16 +209,16 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU32<250>; type Version = (); type PalletInfo = PalletInfo; @@ -232,39 +231,39 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type SomeConst = ConstU64<10>; type Balance = u64; } pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 7e05e2ecf783b..360a73e5ea2a3 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -16,10 +16,11 @@ // limitations under the License. use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, UnfilteredDispatchable}, + dispatch::UnfilteredDispatchable, pallet_prelude::ValueQuery, storage::unhashed, traits::{ConstU32, GetCallName, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, }; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, @@ -30,7 +31,7 @@ use sp_runtime::{DispatchError, ModuleError}; #[frame_support::pallet] pub mod pallet { use codec::MaxEncodedLen; - use frame_support::{pallet_prelude::*, parameter_types, scale_info}; + use frame_support::{pallet_prelude::*, scale_info}; use frame_system::pallet_prelude::*; use sp_std::any::TypeId; @@ -41,8 +42,7 @@ pub mod pallet { #[pallet::constant] type MyGetParam: Get; type Balance: Parameter + Default + scale_info::StaticTypeInfo; - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::pallet] @@ -54,10 +54,10 @@ pub mod pallet { fn on_initialize(_: BlockNumberFor) -> Weight { if TypeId::of::() == TypeId::of::<()>() { Self::deposit_event(Event::Something(10)); - Weight::from_ref_time(10) + 10 } else { Self::deposit_event(Event::Something(11)); - Weight::from_ref_time(11) + 11 } } fn on_finalize(_: BlockNumberFor) { @@ -70,10 +70,10 @@ pub mod pallet { fn on_runtime_upgrade() -> Weight { if TypeId::of::() == TypeId::of::<()>() { Self::deposit_event(Event::Something(30)); - Weight::from_ref_time(30) + 30 } else { Self::deposit_event(Event::Something(31)); - Weight::from_ref_time(31) + 31 } } fn integrity_test() {} @@ -82,7 +82,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { /// Doc comment put in metadata - #[pallet::weight(Weight::from_ref_time(*_foo as u64))] + #[pallet::weight(Weight::from(*_foo))] pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, @@ -104,11 +104,9 @@ pub mod pallet { } #[pallet::error] - #[derive(PartialEq, Eq)] pub enum Error { /// doc comment put into metadata InsufficientProposersBalance, - NonExistentStorageValue, } #[pallet::event] @@ -130,20 +128,6 @@ pub mod pallet { #[pallet::storage] pub type Map2 = StorageMap<_, Twox64Concat, u16, u32>; - parameter_types! { - pub const Map3Default: Result> = Ok(1337); - } - - #[pallet::storage] - pub type Map3 = StorageMap< - _, - Blake2_128Concat, - u32, - u64, - ResultQuery::NonExistentStorageValue>, - Map3Default, - >; - #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; @@ -152,17 +136,6 @@ pub mod pallet { pub type DoubleMap2 = StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; - #[pallet::storage] - pub type DoubleMap3 = StorageDoubleMap< - _, - Blake2_128Concat, - u32, - Twox64Concat, - u64, - u128, - ResultQuery::NonExistentStorageValue>, - >; - #[pallet::storage] #[pallet::getter(fn nmap)] pub type NMap = StorageNMap<_, storage::Key, u32>; @@ -172,15 +145,6 @@ pub mod pallet { pub type NMap2 = StorageNMap<_, (storage::Key, storage::Key), u64>; - #[pallet::storage] - #[pallet::getter(fn nmap3)] - pub type NMap3 = StorageNMap< - _, - (NMapKey, NMapKey), - u128, - ResultQuery::NonExistentStorageValue>, - >; - #[pallet::genesis_config] #[derive(Default)] pub struct GenesisConfig { @@ -253,8 +217,7 @@ pub mod pallet2 { #[pallet::config] pub trait Config: frame_system::Config { - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; } #[pallet::pallet] @@ -286,16 +249,16 @@ pub mod pallet2 { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); @@ -311,25 +274,25 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type MyGetParam = ConstU32<10>; type Balance = u64; } impl pallet::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type MyGetParam = ConstU32<10>; type Balance = u64; } impl pallet2::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } impl pallet2::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where @@ -346,18 +309,12 @@ frame_support::construct_runtime!( } ); -use frame_support::weights::Weight; - #[test] fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_ref_time(3), - class: DispatchClass::Normal, - pays_fee: Pays::Yes - } + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!(pallet::Call::::get_call_names(), &["foo", "foo_storage_layer"]); @@ -365,11 +322,7 @@ fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { - weight: Weight::from_ref_time(3), - class: DispatchClass::Normal, - pays_fee: Pays::Yes - } + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -434,7 +387,7 @@ fn pallet_expand_deposit_event() { .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example(pallet::Event::Something(3)), + Event::Example(pallet::Event::Something(3)), ); }); @@ -445,7 +398,7 @@ fn pallet_expand_deposit_event() { .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - RuntimeEvent::Instance1Example(pallet::Event::Something(3)), + Event::Instance1Example(pallet::Event::Something(3)), ); }); } @@ -483,13 +436,6 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); - >::insert(1, 2); - let mut k = [twox_128(b"Example"), twox_128(b"Map3")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(2u64)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!(>::get(2), Ok(1337)); - >::insert(&1, &2, &3); let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -504,17 +450,6 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); - >::insert(&1, &2, &3); - let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap3")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - k.extend(2u64.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u128)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - >::get(2, 3), - Err(pallet::Error::::NonExistentStorageValue), - ); - >::insert((&1,), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -527,17 +462,6 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); - - >::insert((&1, &2), &3); - let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); - k.extend(1u8.using_encoded(blake2_128_concat)); - k.extend(2u16.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u128)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - >::get((2, 3)), - Err(pallet::Error::::NonExistentStorageValue), - ); }); TestExternalities::default().execute_with(|| { @@ -557,13 +481,6 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); - >::insert(1, 2); - let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map3")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(2u64)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!(>::get(2), Ok(1337)); - >::insert(&1, &2, &3); let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -578,17 +495,6 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); - >::insert(&1, &2, &3); - let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap3")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - k.extend(2u64.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u128)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - >::get(2, 3), - Err(pallet::Error::::NonExistentStorageValue), - ); - >::insert((&1,), &3); let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -601,17 +507,6 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); - - >::insert((&1, &2), &3); - let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap3")].concat(); - k.extend(1u8.using_encoded(blake2_128_concat)); - k.extend(2u16.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u128)); - assert_eq!(&k[..32], &>::final_prefix()); - assert_eq!( - >::get((2, 3)), - Err(pallet::Error::::NonExistentStorageValue), - ); }); } @@ -662,34 +557,34 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllPalletsWithoutSystem::on_initialize(1), Weight::from_ref_time(21)); + assert_eq!(AllPalletsWithoutSystem::on_initialize(1), 21); AllPalletsWithoutSystem::on_finalize(1); - assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), Weight::from_ref_time(61)); + assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), 61); assert_eq!( frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example(pallet::Event::Something(10)), + Event::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - RuntimeEvent::Instance1Example(pallet::Event::Something(11)), + Event::Instance1Example(pallet::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - RuntimeEvent::Example(pallet::Event::Something(20)), + Event::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - RuntimeEvent::Instance1Example(pallet::Event::Something(21)), + Event::Instance1Example(pallet::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - RuntimeEvent::Example(pallet::Event::Something(30)), + Event::Example(pallet::Event::Something(30)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - RuntimeEvent::Instance1Example(pallet::Event::Something(31)), + Event::Instance1Example(pallet::Event::Something(31)), ); }) } @@ -793,17 +688,6 @@ fn metadata() { default: vec![0], docs: vec![], }, - StorageEntryMetadata { - name: "Map3", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: scale_info::meta_type::(), - value: scale_info::meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], - }, - default: vec![0, 57, 5, 0, 0, 0, 0, 0, 0], - docs: vec![], - }, StorageEntryMetadata { name: "DoubleMap", modifier: StorageEntryModifier::Optional, @@ -826,17 +710,6 @@ fn metadata() { default: vec![0], docs: vec![], }, - StorageEntryMetadata { - name: "DoubleMap3", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: scale_info::meta_type::(), - key: scale_info::meta_type::<(u32, u64)>(), - hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], - }, - default: vec![1, 1], - docs: vec![], - }, StorageEntryMetadata { name: "NMap", modifier: StorageEntryModifier::Optional, @@ -859,17 +732,6 @@ fn metadata() { default: vec![0], docs: vec![], }, - StorageEntryMetadata { - name: "NMap3", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: scale_info::meta_type::<(u8, u16)>(), - hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], - value: scale_info::meta_type::(), - }, - default: vec![1, 1], - docs: vec![], - }, ], }), calls: Some(scale_info::meta_type::>().into()), diff --git a/frame/support/test/tests/pallet_ui/attr_non_empty.stderr b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr index 9eac5de35db80..144af5a17ea5c 100644 --- a/frame/support/test/tests/pallet_ui/attr_non_empty.stderr +++ b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr @@ -1,5 +1,5 @@ -error: Invalid pallet macro call: unexpected attribute. Macro call must be bare, such as `#[frame_support::pallet]` or `#[pallet]`, or must specify the `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or #[pallet(dev_mode)]. - --> tests/pallet_ui/attr_non_empty.rs:1:26 +error: Invalid pallet macro call: expected no attributes, e.g. macro call must be just `#[frame_support::pallet]` or `#[pallet]` + --> $DIR/attr_non_empty.rs:1:26 | 1 | #[frame_support::pallet [foo]] | ^^^ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 62d8649f8af49..1d581ea7ed572 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -5,8 +5,8 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | ^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required for `&::Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index f486c071631ea..b1487776eac50 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -5,8 +5,8 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | ^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required for `&::Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 @@ -35,7 +35,7 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | - = note: required for `::Bar` to implement `Encode` + = note: required because of the requirements on the impl of `Encode` for `::Bar` error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:17:12 @@ -43,4 +43,4 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is 17 | #[pallet::call] | ^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` | - = note: required for `::Bar` to implement `Decode` + = note: required because of the requirements on the impl of `Decode` for `::Bar` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 6e51bf2dbf862..a0418760ba7e2 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -6,8 +6,8 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `Bar` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` - = note: required for `&Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` help: consider annotating `Bar` with `#[derive(Debug)]` | 17 | #[derive(Debug)] diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs deleted file mode 100644 index f044ae6d7878f..0000000000000 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs +++ /dev/null @@ -1,33 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - // The struct on which we build all of our Pallet logic. - #[pallet::pallet] - pub struct Pallet(_); - - // Your Pallet's configuration trait, representing custom external types and interfaces. - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::storage] - type MyStorage = StorageValue<_, Vec>; - - // Your Pallet's callable functions. - #[pallet::call] - impl Pallet { - pub fn my_call(_origin: OriginFor) -> DispatchResult { - Ok(()) - } - } - - // Your Pallet's internal functions. - impl Pallet {} -} - -fn main() {} diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr deleted file mode 100644 index fac7fd77df9ae..0000000000000 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` - --> tests/pallet_ui/dev_mode_without_arg.rs:24:7 - | -24 | pub fn my_call(_origin: OriginFor) -> DispatchResult { - | ^^ - -error[E0432]: unresolved import `pallet` - --> tests/pallet_ui/dev_mode_without_arg.rs:3:9 - | -3 | pub use pallet::*; - | ^^^^^^ help: a similar path exists: `test_pallet::pallet` diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs deleted file mode 100644 index f6efcc3fc3d72..0000000000000 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs +++ /dev/null @@ -1,34 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - // The struct on which we build all of our Pallet logic. - #[pallet::pallet] - pub struct Pallet(_); - - // Your Pallet's configuration trait, representing custom external types and interfaces. - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::storage] - type MyStorage = StorageValue<_, Vec>; - - // Your Pallet's callable functions. - #[pallet::call] - impl Pallet { - #[pallet::weight(0)] - pub fn my_call(_origin: OriginFor) -> DispatchResult { - Ok(()) - } - } - - // Your Pallet's internal functions. - impl Pallet {} -} - -fn main() {} diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr deleted file mode 100644 index a5ec31a9bb4e7..0000000000000 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ /dev/null @@ -1,17 +0,0 @@ -error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied - --> tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs:11:12 - | -11 | #[pallet::pallet] - | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Vec` - | - = help: the following other types implement trait `MaxEncodedLen`: - () - (TupleElement0, TupleElement1) - (TupleElement0, TupleElement1, TupleElement2) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) - (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.rs b/frame/support/test/tests/pallet_ui/event_field_not_member.rs index 2b45a971788fb..0ecde4c130878 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.rs +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.rs @@ -6,7 +6,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config { type Bar; - type RuntimeEvent: IsType<::RuntimeEvent> + From>; + type Event: IsType<::Event> + From>; } #[pallet::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 1161f4a190231..92623e0329fe3 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -17,5 +17,5 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required for `&::Bar` to implement `std::fmt::Debug` - = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr index 2eda72eb5f72f..e3126ad6a85dc 100644 --- a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr @@ -1,4 +1,4 @@ -error: Invalid usage of RuntimeEvent, `Config` contains no associated type `RuntimeEvent`, but enum `Event` is declared (in use of `#[pallet::event]`). An RuntimeEvent associated type must be declare on trait `Config`. +error: Invalid usage of Event, `Config` contains no associated type `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). An Event associated type must be declare on trait `Config`. --> $DIR/event_not_in_trait.rs:1:1 | 1 | #[frame_support::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs index a02cc9b9de883..fa3bf04d3530d 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs @@ -6,7 +6,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config { type Bar; - type RuntimeEvent; + type Event; } #[pallet::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr index d54149d719a3b..1f58a37576d0d 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr @@ -1,5 +1,5 @@ -error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `IsType<::RuntimeEvent>` +error: Invalid `type Event`, associated type `Event` is reserved and must bound: `IsType<::Event>` --> $DIR/event_type_invalid_bound.rs:9:3 | -9 | type RuntimeEvent; +9 | type Event; | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs index 99df89d67278c..564a539b89f57 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs @@ -6,7 +6,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config { type Bar; - type RuntimeEvent: IsType<::RuntimeEvent>; + type Event: IsType<::Event>; } #[pallet::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr index ea8b2ff000ceb..8b8946f3b25eb 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr @@ -1,5 +1,5 @@ -error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `From` or `From>` or `From>` +error: Invalid `type Event`, associated type `Event` is reserved and must bound: `From` or `From>` or `From>` --> $DIR/event_type_invalid_bound_2.rs:9:3 | -9 | type RuntimeEvent: IsType<::RuntimeEvent>; +9 | type Event: IsType<::Event>; | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index ff52a094d6f8d..d1a89fbb850e9 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,13 +1,13 @@ error[E0107]: missing generics for trait `Hooks` - --> tests/pallet_ui/hooks_invalid_item.rs:12:18 + --> $DIR/hooks_invalid_item.rs:12:18 | 12 | impl Hooks for Pallet {} | ^^^^^ expected 1 generic argument | note: trait defined here, with 1 generic parameter: `BlockNumber` - --> $WORKSPACE/frame/support/src/traits/hooks.rs + --> $DIR/hooks.rs:214:11 | - | pub trait Hooks { +214 | pub trait Hooks { | ^^^^^ ----------- help: add missing generic argument | diff --git a/frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs b/frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs deleted file mode 100644 index 1fc42f6511cfa..0000000000000 --- a/frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[frame_support::pallet(foo)] -pub mod pallet {} - -fn main() {} diff --git a/frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr b/frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr deleted file mode 100644 index 234dc07f2ece3..0000000000000 --- a/frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Invalid pallet macro call: unexpected attribute. Macro call must be bare, such as `#[frame_support::pallet]` or `#[pallet]`, or must specify the `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or #[pallet(dev_mode)]. - --> tests/pallet_ui/pallet_invalid_arg.rs:1:25 - | -1 | #[frame_support::pallet(foo)] - | ^^^ diff --git a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs deleted file mode 100644 index 97e0d585d0ce9..0000000000000 --- a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] - -pub use pallet::*; - -#[frame_support::pallet(dev_mode)] -pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - // The struct on which we build all of our Pallet logic. - #[pallet::pallet] - pub struct Pallet(_); - - // Your Pallet's configuration trait, representing custom external types and interfaces. - #[pallet::config] - pub trait Config: frame_system::Config {} - - // The MEL requirement for bounded pallets is skipped by `dev_mode`. - #[pallet::storage] - type MyStorage = StorageValue<_, Vec>; - - // Your Pallet's callable functions. - #[pallet::call] - impl Pallet { - // No need to define a `weight` attribute here because of `dev_mode`. - pub fn my_call(_origin: OriginFor) -> DispatchResult { - Ok(()) - } - } - - // Your Pallet's internal functions. - impl Pallet {} -} - -fn main() {} diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 42ef5a34e4c30..45cdfad67b8ae 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -9,9 +9,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required for `Bar` to implement `Decode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:10:12 @@ -29,9 +29,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:10:12 @@ -49,10 +49,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required for `Bar` to implement `Encode` - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -69,9 +69,9 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others - = note: required for `Bar` to implement `StaticTypeInfo` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + and 158 others + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -84,9 +84,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required for `Bar` to implement `Decode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -104,9 +104,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -124,7 +124,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required for `Bar` to implement `Encode` - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 461d63ebb0d9c..d7441e8b18562 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -9,9 +9,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required for `Bar` to implement `Decode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:10:12 @@ -29,9 +29,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:10:12 @@ -49,10 +49,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required for `Bar` to implement `Encode` - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -69,9 +69,9 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 161 others - = note: required for `Bar` to implement `StaticTypeInfo` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + and 158 others + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -84,9 +84,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required for `Bar` to implement `Decode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -104,9 +104,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -124,7 +124,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required for `Bar` to implement `Encode` - = note: required for `Bar` to implement `FullEncode` - = note: required for `Bar` to implement `FullCodec` - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index cce9fa70b3da5..c2ec8cf7f4d05 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others - = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` + and 72 others + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 877485dda2084..dbbc426de2906 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others - = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` - = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` + and 72 others + = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index 80c6526bbf888..6313bd691f943 100644 --- a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage` +error: expected one of: `getter`, `storage_prefix`, `unbounded` --> $DIR/storage_invalid_attribute.rs:16:12 | 16 | #[pallet::generate_store(pub trait Store)] diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr index 223e9cfa3e9f8..4fd59183282d0 100644 --- a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. --> $DIR/storage_not_storage_type.rs:19:16 | 19 | type Foo = u8; diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs deleted file mode 100644 index a051cc087db58..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs +++ /dev/null @@ -1,21 +0,0 @@ -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::error] - pub enum Error { - NonExistentValue, - } - - #[pallet::storage] - type Foo = StorageValue<_, u8, ResultQuery>; -} - -fn main() { -} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr deleted file mode 100644 index 98265462bbdfb..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr +++ /dev/null @@ -1,15 +0,0 @@ -error[E0107]: missing generics for enum `pallet::Error` - --> tests/pallet_ui/storage_result_query_missing_generics.rs:17:56 - | -17 | type Foo = StorageValue<_, u8, ResultQuery>; - | ^^^^^ expected 1 generic argument - | -note: enum defined here, with 1 generic parameter: `T` - --> tests/pallet_ui/storage_result_query_missing_generics.rs:12:11 - | -12 | pub enum Error { - | ^^^^^ - -help: add missing generic argument - | -17 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue>>; - | ~~~~~~~~ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs deleted file mode 100644 index 9e0da4b62128d..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs +++ /dev/null @@ -1,23 +0,0 @@ -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::BlockNumberFor; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::error] - pub enum Error { - NonExistentValue, - SomeOtherError, - } - - #[pallet::storage] - type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue, SomeOtherError>>; -} - -fn main() { -} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr deleted file mode 100644 index 4be2a36eb89e1..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Invalid pallet::storage, unexpected number of generic arguments for ResultQuery, expected 1 type argument, found 2 - --> tests/pallet_ui/storage_result_query_multiple_type_args.rs:19:56 - | -19 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue, SomeOtherError>>; - | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs deleted file mode 100644 index 102a2261f8333..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs +++ /dev/null @@ -1,16 +0,0 @@ -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::storage] - type Foo = StorageValue<_, u8, ResultQuery>; -} - -fn main() { -} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr deleted file mode 100644 index 77a7972a5b5cf..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Invalid pallet::storage, unexpected number of path segments for the generics in ResultQuery, expected a path with at least 2 segments, found 1 - --> tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs:12:56 - | -12 | type Foo = StorageValue<_, u8, ResultQuery>; - | ^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs deleted file mode 100644 index f30dc3b6a3cc7..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs +++ /dev/null @@ -1,22 +0,0 @@ -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::BlockNumberFor; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::error] - pub enum Error { - NonExistentValue, - } - - #[pallet::storage] - type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; -} - -fn main() { -} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr deleted file mode 100644 index caffd846f272a..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Invalid pallet::storage, unexpected generic args for ResultQuery, expected angle-bracketed arguments, found `(NonExistentValue)` - --> tests/pallet_ui/storage_result_query_parenthesized_generics.rs:18:55 - | -18 | type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; - | ^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs deleted file mode 100644 index a5065398b3970..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs +++ /dev/null @@ -1,22 +0,0 @@ -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::BlockNumberFor; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::error] - pub enum Error { - NonExistentValue, - } - - #[pallet::storage] - type Foo = StorageValue<_, u8, ResultQuery<'static>>; -} - -fn main() { -} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr deleted file mode 100644 index 9f333ae28e6aa..0000000000000 --- a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Invalid pallet::storage, unexpected generic argument kind, expected a type path to a `PalletError` enum variant, found `'static` - --> tests/pallet_ui/storage_result_query_wrong_generic_kind.rs:18:56 - | -18 | type Foo = StorageValue<_, u8, ResultQuery<'static>>; - | ^^^^^^^ diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 0066420566fe8..7ed8454668327 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -18,7 +18,7 @@ pub trait Trait: frame_system::Config { type Balance: frame_support::dispatch::Parameter; /// The overarching event type. - type RuntimeEvent: From> + Into<::RuntimeEvent>; + type Event: From> + Into<::Event>; } frame_support::decl_storage! { @@ -43,7 +43,7 @@ frame_support::decl_error!( ); frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; type Error = Error; const Foo: u32 = u32::MAX; @@ -54,7 +54,7 @@ frame_support::decl_module! { } fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { - frame_support::weights::Weight::zero() + 0 } } } @@ -108,7 +108,7 @@ mod tests { type TestHeader = sp_runtime::generic::Header; type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< ::AccountId, - ::RuntimeCall, + ::Call, (), SignedExtra, >; @@ -126,16 +126,16 @@ mod tests { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = sp_core::H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = TestHeader; - type RuntimeEvent = (); + type Event = (); type BlockHashCount = ConstU64<250>; type DbWeight = (); type BlockWeights = (); @@ -153,6 +153,6 @@ mod tests { impl pallet_test::Trait for Runtime { type Balance = u32; - type RuntimeEvent = (); + type Event = (); } } diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 6fbbb8ac67bd7..05ed60fe90196 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -59,7 +59,7 @@ pub mod decl_pallet { pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn set_value(_origin, value: u32) { DeclValue::put(value); @@ -79,23 +79,23 @@ pub type BlockNumber = u64; pub type Index = u64; pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u32; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU32<250>; type Version = (); type PalletInfo = PalletInfo; @@ -255,12 +255,12 @@ fn storage_layer_commit_then_rollback() { fn storage_layer_in_pallet_call() { TestExternalities::default().execute_with(|| { use sp_runtime::traits::Dispatchable; - let call1 = RuntimeCall::MyPallet(pallet::Call::set_value { value: 2 }); - assert_ok!(call1.dispatch(RuntimeOrigin::signed(0))); + let call1 = Call::MyPallet(pallet::Call::set_value { value: 2 }); + assert_ok!(call1.dispatch(Origin::signed(0))); assert_eq!(Value::::get(), 2); - let call2 = RuntimeCall::MyPallet(pallet::Call::set_value { value: 1 }); - assert_noop!(call2.dispatch(RuntimeOrigin::signed(0)), Error::::Revert); + let call2 = Call::MyPallet(pallet::Call::set_value { value: 1 }); + assert_noop!(call2.dispatch(Origin::signed(0)), Error::::Revert); }); } @@ -270,16 +270,11 @@ fn storage_layer_in_decl_pallet_call() { use frame_support::StorageValue; use sp_runtime::traits::Dispatchable; - let call1 = RuntimeCall::DeclPallet(decl_pallet::Call::set_value { value: 2 }); - assert_ok!(call1.dispatch(RuntimeOrigin::signed(0))); + let call1 = Call::DeclPallet(decl_pallet::Call::set_value { value: 2 }); + assert_ok!(call1.dispatch(Origin::signed(0))); assert_eq!(decl_pallet::DeclValue::get(), 2); - let call2 = RuntimeCall::DeclPallet(decl_pallet::Call::set_value { value: 1 }); - assert_noop!(call2.dispatch(RuntimeOrigin::signed(0)), "Revert!"); - // Calling the function directly also works with storage layers. - assert_noop!( - decl_pallet::Module::::set_value(RuntimeOrigin::signed(1), 1), - "Revert!" - ); + let call2 = Call::DeclPallet(decl_pallet::Call::set_value { value: 1 }); + assert_noop!(call2.dispatch(Origin::signed(0)), "Revert!"); }); } diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index 6fedd75019e37..9e597969d6c89 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -31,7 +31,7 @@ use sp_std::result; pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] #[transactional] fn value_commits(_origin, v: u32) { @@ -57,7 +57,7 @@ frame_support::decl_storage! { struct Runtime; impl frame_support_test::Config for Runtime { - type RuntimeOrigin = u32; + type Origin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index eff41242917a0..b30fd8d5ec561 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -22,28 +22,28 @@ use frame_support::{ }; pub trait Config: 'static + Eq + Clone { - type RuntimeOrigin: Into, Self::RuntimeOrigin>> + type Origin: Into, Self::Origin>> + From>; - type BaseCallFilter: frame_support::traits::Contains; + type BaseCallFilter: frame_support::traits::Contains; type BlockNumber: Decode + Encode + EncodeLike + Clone + Default + scale_info::TypeInfo; type Hash; type AccountId: Encode + EncodeLike + Decode + scale_info::TypeInfo; - type RuntimeCall; - type RuntimeEvent: From>; + type Call; + type Event: From>; type PalletInfo: frame_support::traits::PalletInfo; type DbWeight: Get; } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] fn noop(_origin) {} } } impl Module { - pub fn deposit_event(_event: impl Into) {} + pub fn deposit_event(_event: impl Into) {} } frame_support::decl_event!( diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index dd3a5d606bad5..3429c6546c7fd 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -23,7 +23,6 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-version = { version = "5.0.0", default-features = false, path = "../../primitives/version" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } [dev-dependencies] criterion = "0.3.3" @@ -43,7 +42,6 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-version/std", - "sp-weights/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 0881b81eaca7d..0bc34fcbc5be2 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -16,10 +16,7 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::{ - traits::{ConstU32, ConstU64}, - weights::Weight, -}; +use frame_support::traits::{ConstU32, ConstU64}; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -37,7 +34,7 @@ mod module { #[pallet::config] pub trait Config: frame_system::Config { - type RuntimeEvent: From + IsType<::RuntimeEvent>; + type Event: From + IsType<::Event>; } #[pallet::event] @@ -64,7 +61,7 @@ frame_support::construct_runtime!( frame_support::parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( - Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75), + 4 * 1024 * 1024, Perbill::from_percent(75), ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength::max_with_normal_ratio( @@ -76,16 +73,16 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = BlockLength; type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -99,7 +96,7 @@ impl frame_system::Config for Runtime { } impl module::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; } fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 9ec9ed2ae6d21..437d06a17c781 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -37,9 +37,3 @@ std = [ "sp-runtime/std", "sp-std/std", ] - -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", -] diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 0f7603fe1dd9f..367e6c73c4134 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -21,7 +21,7 @@ use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_support::{dispatch::DispatchClass, storage, traits::Get}; +use frame_support::{storage, traits::Get, weights::DispatchClass}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; @@ -64,7 +64,7 @@ benchmarks! { #[skip_meta] set_storage { - let i in 0 .. 1000; + let i in 1 .. 1000; // Set up i items to add let mut items = Vec::new(); @@ -72,69 +72,56 @@ benchmarks! { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); items.push((hash.clone(), hash.clone())); } - - let items_to_verify = items.clone(); }: _(RawOrigin::Root, items) verify { - // Verify that they're actually in the storage. - for (item, _) in items_to_verify { - let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; - assert_eq!(value, *item); - } + let last_hash = (i, i - 1).using_encoded(T::Hashing::hash); + let value = storage::unhashed::get_raw(last_hash.as_ref()).ok_or("No value stored")?; + assert_eq!(value, last_hash.as_ref().to_vec()); } #[skip_meta] kill_storage { - let i in 0 .. 1000; + let i in 1 .. 1000; // Add i items to storage - let mut items = Vec::with_capacity(i as usize); + let mut items = Vec::new(); for j in 0 .. i { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); storage::unhashed::put_raw(&hash, &hash); items.push(hash); } - // Verify that they're actually in the storage. - for item in &items { - let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; - assert_eq!(value, *item); - } + // We will verify this value is removed + let last_hash = (i, i - 1).using_encoded(T::Hashing::hash); + let value = storage::unhashed::get_raw(last_hash.as_ref()).ok_or("No value stored")?; + assert_eq!(value, last_hash.as_ref().to_vec()); - let items_to_verify = items.clone(); }: _(RawOrigin::Root, items) verify { - // Verify that they're not in the storage anymore. - for item in items_to_verify { - assert!(storage::unhashed::get_raw(&item).is_none()); - } + assert_eq!(storage::unhashed::get_raw(last_hash.as_ref()), None); } #[skip_meta] kill_prefix { - let p in 0 .. 1000; + let p in 1 .. 1000; let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); - let mut items = Vec::with_capacity(p as usize); // add p items that share a prefix for i in 0 .. p { let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); let key = [&prefix[..], &hash[..]].concat(); storage::unhashed::put_raw(&key, &key); - items.push(key); } - // Verify that they're actually in the storage. - for item in &items { - let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; - assert_eq!(value, *item); - } + // We will verify this value is removed + let last_hash = (p, p - 1).using_encoded(T::Hashing::hash).as_ref().to_vec(); + let last_key = [&prefix[..], &last_hash[..]].concat(); + let value = storage::unhashed::get_raw(&last_key).ok_or("No value stored")?; + assert_eq!(value, last_key); + }: _(RawOrigin::Root, prefix, p) verify { - // Verify that they're not in the storage anymore. - for item in items { - assert!(storage::unhashed::get_raw(&item).is_none()); - } + assert_eq!(storage::unhashed::get_raw(&last_key), None); } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index a7f28ca30fe87..08b043ae62741 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -43,16 +43,16 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index f5811f306cfe3..a0679b11487f6 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -54,7 +54,7 @@ impl CheckGenesis { impl SignedExtension for CheckGenesis { type AccountId = T::AccountId; - type Call = ::RuntimeCall; + type Call = ::Call; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckGenesis"; diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 635ab4ef1d9a9..5090093fe168f 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -56,7 +56,7 @@ impl sp_std::fmt::Debug for CheckMortality { impl SignedExtension for CheckMortality { type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = T::Call; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckMortality"; @@ -101,10 +101,7 @@ impl SignedExtension for CheckMortality { mod tests { use super::*; use crate::mock::{new_test_ext, System, Test, CALL}; - use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, Pays}, - weights::Weight, - }; + use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; use sp_core::H256; #[test] @@ -129,11 +126,8 @@ mod tests { #[test] fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { - weight: Weight::from_ref_time(100), - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let len = 0_usize; let ext = ( crate::CheckWeight::::new(), diff --git a/frame/system/src/extensions/check_non_zero_sender.rs b/frame/system/src/extensions/check_non_zero_sender.rs index 036f70c2fdd48..9a6c4007b3779 100644 --- a/frame/system/src/extensions/check_non_zero_sender.rs +++ b/frame/system/src/extensions/check_non_zero_sender.rs @@ -17,7 +17,7 @@ use crate::Config; use codec::{Decode, Encode}; -use frame_support::dispatch::DispatchInfo; +use frame_support::weights::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, SignedExtension}, @@ -53,10 +53,10 @@ impl CheckNonZeroSender { impl SignedExtension for CheckNonZeroSender where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = T::Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "CheckNonZeroSender"; diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 1616a2d8a119e..476aa2fb7478c 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -17,7 +17,7 @@ use crate::Config; use codec::{Decode, Encode}; -use frame_support::dispatch::DispatchInfo; +use frame_support::weights::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, One, SignedExtension}, @@ -60,10 +60,10 @@ impl sp_std::fmt::Debug for CheckNonce { impl SignedExtension for CheckNonce where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = T::Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index ef5f40402692c..0280d31f657ae 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -54,7 +54,7 @@ impl CheckSpecVersion { impl SignedExtension for CheckSpecVersion { type AccountId = T::AccountId; - type Call = ::RuntimeCall; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckSpecVersion"; diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index be0b8fe2354aa..b92d8978bde01 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -54,7 +54,7 @@ impl CheckTxVersion { impl SignedExtension for CheckTxVersion { type AccountId = T::AccountId; - type Call = ::RuntimeCall; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckTxVersion"; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 5c3b80f59bfa8..b59c36ecb53b5 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -18,8 +18,8 @@ use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Decode, Encode}; use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, traits::Get, + weights::{DispatchClass, DispatchInfo, PostDispatchInfo}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -27,7 +27,6 @@ use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, DispatchResult, }; -use sp_weights::Weight; /// Block resource (weight) limit check. /// @@ -41,17 +40,16 @@ pub struct CheckWeight(sp_std::marker::PhantomData); impl CheckWeight where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { /// Checks if the current extrinsic does not exceed the maximum weight a single extrinsic /// with given `DispatchClass` can have. fn check_extrinsic_weight( - info: &DispatchInfoOf, + info: &DispatchInfoOf, ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight.any_gt(max) => - Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if info.weight > max => Err(InvalidTransaction::ExhaustsResources.into()), _ => Ok(()), } } @@ -60,18 +58,18 @@ where /// /// Upon successes, it returns the new block weight as a `Result`. fn check_block_weight( - info: &DispatchInfoOf, + info: &DispatchInfoOf, ) -> Result { let maximum_weight = T::BlockWeights::get(); let all_weight = Pallet::::block_weight(); - calculate_consumed_weight::(maximum_weight, all_weight, info) + calculate_consumed_weight::(maximum_weight, all_weight, info) } /// Checks if the current extrinsic can fit into the block with respect to block length limits. /// /// Upon successes, it returns the new block length as a `Result`. fn check_block_length( - info: &DispatchInfoOf, + info: &DispatchInfoOf, len: usize, ) -> Result { let length_limit = T::BlockLength::get(); @@ -94,7 +92,7 @@ where /// /// It checks and notes the new weight and length. pub fn do_pre_dispatch( - info: &DispatchInfoOf, + info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { let next_len = Self::check_block_length(info, len)?; @@ -109,7 +107,7 @@ where /// Do the validate checks. This can be applied to both signed and unsigned. /// /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { + pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { // ignore the next length. If they return `Ok`, then it is below the limit. let _ = Self::check_block_length(info, len)?; // during validation we skip block limit check. Since the `validate_transaction` @@ -146,8 +144,7 @@ where // Check if we don't exceed per-class allowance match limit_per_class.max_total { - Some(max) if per_class.any_gt(max) => - return Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if per_class > max => return Err(InvalidTransaction::ExhaustsResources.into()), // There is no `max_total` limit (`None`), // or we are below the limit. _ => {}, @@ -155,10 +152,10 @@ where // In cases total block weight is exceeded, we need to fall back // to `reserved` pool if there is any. - if all_weight.total().any_gt(maximum_weight.max_block) { + if all_weight.total() > maximum_weight.max_block { match limit_per_class.reserved { // We are over the limit in reserved pool. - Some(reserved) if per_class.any_gt(reserved) => + Some(reserved) if per_class > reserved => return Err(InvalidTransaction::ExhaustsResources.into()), // There is either no limit in reserved pool (`None`), // or we are below the limit. @@ -171,10 +168,10 @@ where impl SignedExtension for CheckWeight where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = T::Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; @@ -241,7 +238,7 @@ where } let unspent = post_info.calc_unspent(info); - if unspent.any_gt(Weight::zero()) { + if unspent > 0 { crate::BlockWeight::::mutate(|current_weight| { current_weight.sub(unspent, info.class); }) @@ -270,7 +267,10 @@ mod tests { mock::{new_test_ext, System, Test, CALL}, AllExtrinsicsLen, BlockWeight, }; - use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; + use frame_support::{ + assert_err, assert_ok, + weights::{Pays, Weight}, + }; use sp_std::marker::PhantomData; fn block_weights() -> crate::limits::BlockWeights { @@ -297,7 +297,7 @@ mod tests { fn check(call: impl FnOnce(&DispatchInfo, usize)) { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: Weight::MAX, + weight: Weight::max_value(), class: DispatchClass::Mandatory, ..Default::default() }; @@ -309,8 +309,8 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); - assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); + assert_eq!(System::block_weight().total(), Weight::max_value()); + assert!(System::block_weight().total() > block_weight_limit()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -321,8 +321,7 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + - Weight::from_ref_time(1), + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + 1, class: DispatchClass::Normal, ..Default::default() }; @@ -342,13 +341,13 @@ mod tests { .get(DispatchClass::Operational) .max_total .unwrap_or_else(|| weights.max_block); - let base_weight = weights.get(DispatchClass::Operational).base_extrinsic; + let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; let weight = operational_limit - base_weight; let okay = DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; let max = DispatchInfo { - weight: weight + Weight::from_ref_time(1), + weight: weight + 1, class: DispatchClass::Operational, ..Default::default() }; @@ -365,9 +364,9 @@ mod tests { #[test] fn register_extra_weight_unchecked_doesnt_care_about_limits() { new_test_ext().execute_with(|| { - System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); - assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); + System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); + assert_eq!(System::block_weight().total(), Weight::max_value()); + assert!(System::block_weight().total() > block_weight_limit()); }); } @@ -378,11 +377,10 @@ mod tests { // Max normal is 768 (75%) // 10 is taken for block execution weight // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) - // And Operational can be 246 to produce a full block (-10 for base) - let max_normal = - DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; + // And Operational can be 256 to produce a full block (-5 for base) + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(246), + weight: 251, class: DispatchClass::Operational, ..Default::default() }; @@ -390,10 +388,10 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); + assert_eq!(System::block_weight().total(), 768); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); - assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -403,10 +401,9 @@ mod tests { fn dispatch_order_does_not_effect_weight_logic() { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` - let max_normal = - DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(246), + weight: 251, class: DispatchClass::Operational, ..Default::default() }; @@ -414,11 +411,11 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - // Extra 20 here from block execution + base extrinsic weight - assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); + // Extra 15 here from block execution + base extrinsic weight + assert_eq!(System::block_weight().total(), 266); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); - assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); }); } @@ -426,14 +423,11 @@ mod tests { fn operational_works_on_full_block() { new_test_ext().execute_with(|| { // An on_initialize takes up the whole block! (Every time!) - System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Mandatory); - let dispatch_normal = DispatchInfo { - weight: Weight::from_ref_time(251), - class: DispatchClass::Normal, - ..Default::default() - }; + System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); + let dispatch_normal = + DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; let dispatch_operational = DispatchInfo { - weight: Weight::from_ref_time(246), + weight: 251, class: DispatchClass::Operational, ..Default::default() }; @@ -459,9 +453,9 @@ mod tests { #[test] fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: Weight::from_ref_time(100), ..Default::default() }; + let normal = DispatchInfo { weight: 100, ..Default::default() }; let op = DispatchInfo { - weight: Weight::from_ref_time(100), + weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -495,7 +489,7 @@ mod tests { fn signed_ext_check_weight_block_size_works() { new_test_ext().execute_with(|| { let normal = DispatchInfo::default(); - let normal_limit = normal_weight_limit().ref_time() as usize; + let normal_limit = normal_weight_limit() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); @@ -511,11 +505,8 @@ mod tests { reset_check_weight(&normal, normal_limit + 1, true); // Operational ones don't have this limit. - let op = DispatchInfo { - weight: Weight::zero(), - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; + let op = + DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; reset_check_weight(&op, normal_limit, false); reset_check_weight(&op, normal_limit + 100, false); reset_check_weight(&op, 1024, false); @@ -527,14 +518,12 @@ mod tests { fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: Weight::from_ref_time(100), ..Default::default() }; + let small = DispatchInfo { weight: 100, ..Default::default() }; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; - let big = DispatchInfo { - weight: normal_limit - base_extrinsic + Weight::from_ref_time(1), - ..Default::default() - }; + let big = + DispatchInfo { weight: normal_limit - base_extrinsic + 1, ..Default::default() }; let len = 0_usize; let reset_check_weight = |i, f, s| { @@ -549,9 +538,9 @@ mod tests { } }; - reset_check_weight(&small, false, Weight::zero()); - reset_check_weight(&medium, false, Weight::zero()); - reset_check_weight(&big, true, Weight::from_ref_time(1)); + reset_check_weight(&small, false, 0); + reset_check_weight(&medium, false, 0); + reset_check_weight(&big, true, 1); }) } @@ -559,26 +548,20 @@ mod tests { fn signed_ext_check_weight_refund_works() { new_test_ext().execute_with(|| { // This is half of the max block weight - let info = DispatchInfo { weight: Weight::from_ref_time(512), ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_ref_time(128)), - pays_fee: Default::default(), - }; + let info = DispatchInfo { weight: 512, ..Default::default() }; + let post_info = + PostDispatchInfo { actual_weight: Some(128), pays_fee: Default::default() }; let len = 0_usize; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight BlockWeight::::mutate(|current_weight| { - current_weight.set(Weight::zero(), DispatchClass::Mandatory); - current_weight - .set(Weight::from_ref_time(256) - base_extrinsic, DispatchClass::Normal); + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(256 - base_extrinsic, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!( - BlockWeight::::get().total(), - info.weight + Weight::from_ref_time(256) - ); + assert_eq!(BlockWeight::::get().total(), info.weight + 256); assert_ok!(CheckWeight::::post_dispatch( Some(pre), @@ -587,34 +570,27 @@ mod tests { len, &Ok(()) )); - assert_eq!( - BlockWeight::::get().total(), - post_info.actual_weight.unwrap() + Weight::from_ref_time(256) - ); + assert_eq!(BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256); }) } #[test] fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: Weight::from_ref_time(512), ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_ref_time(700)), - pays_fee: Default::default(), - }; + let info = DispatchInfo { weight: 512, ..Default::default() }; + let post_info = + PostDispatchInfo { actual_weight: Some(700), pays_fee: Default::default() }; let len = 0_usize; BlockWeight::::mutate(|current_weight| { - current_weight.set(Weight::zero(), DispatchClass::Mandatory); - current_weight.set(Weight::from_ref_time(128), DispatchClass::Normal); + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(128, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( BlockWeight::::get().total(), - info.weight + - Weight::from_ref_time(128) + - block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert_ok!(CheckWeight::::post_dispatch( @@ -626,9 +602,7 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight + - Weight::from_ref_time(128) + - block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } @@ -637,7 +611,7 @@ mod tests { fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { let weights = block_weights(); - let free = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let free = DispatchInfo { weight: 0, ..Default::default() }; let len = 0_usize; // Initial weight from `weights.base_block` @@ -656,10 +630,9 @@ mod tests { // Max block is 1024 // Max normal is 768 (75%) // Max mandatory is unlimited - let max_normal = - DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; let mandatory = DispatchInfo { - weight: Weight::from_ref_time(1019), + weight: 1019, class: DispatchClass::Mandatory, ..Default::default() }; @@ -667,10 +640,10 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); + assert_eq!(System::block_weight().total(), 768); assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); - assert_eq!(System::block_weight().total(), Weight::from_ref_time(1024 + 768)); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), 1024 + 768); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); }); } @@ -679,45 +652,39 @@ mod tests { fn no_max_total_should_still_be_limited_by_max_block() { // given let maximum_weight = BlockWeights::builder() - .base_block(Weight::zero()) + .base_block(0) .for_class(DispatchClass::non_mandatory(), |w| { - w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_ref_time(20).set_proof_size(u64::MAX)); + w.base_extrinsic = 0; + w.max_total = Some(20); }) .for_class(DispatchClass::Mandatory, |w| { - w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_ref_time(5).set_proof_size(u64::MAX)); + w.base_extrinsic = 0; + w.reserved = Some(5); w.max_total = None; }) .build_or_panic(); let all_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_ref_time(10), - DispatchClass::Operational => Weight::from_ref_time(10), - DispatchClass::Mandatory => Weight::zero(), + DispatchClass::Normal => 10, + DispatchClass::Operational => 10, + DispatchClass::Mandatory => 0, }); - assert_eq!(maximum_weight.max_block, all_weight.total().set_proof_size(u64::MAX)); + assert_eq!(maximum_weight.max_block, all_weight.total()); // fits into reserved - let mandatory1 = DispatchInfo { - weight: Weight::from_ref_time(5), - class: DispatchClass::Mandatory, - ..Default::default() - }; + let mandatory1 = + DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; // does not fit into reserved and the block is full. - let mandatory2 = DispatchInfo { - weight: Weight::from_ref_time(6), - class: DispatchClass::Mandatory, - ..Default::default() - }; + let mandatory2 = + DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; // when - assert_ok!(calculate_consumed_weight::<::RuntimeCall>( + assert_ok!(calculate_consumed_weight::<::Call>( maximum_weight.clone(), all_weight.clone(), &mandatory1 )); assert_err!( - calculate_consumed_weight::<::RuntimeCall>( + calculate_consumed_weight::<::Call>( maximum_weight, all_weight, &mandatory2 diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 7577d0dc6b158..94605c2da59bd 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -84,20 +84,20 @@ use sp_version::RuntimeVersion; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::{ - dispatch::{ - extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, - DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, - }, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, storage, traits::{ ConstU32, Contains, EnsureOrigin, Get, HandleLifetime, OnKilledAccount, OnNewAccount, OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, }, + weights::{ + extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, + PerDispatchClass, RuntimeDbWeight, Weight, + }, Parameter, }; use scale_info::TypeInfo; use sp_core::storage::well_known_keys; -use sp_weights::{RuntimeDbWeight, Weight}; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; @@ -205,7 +205,7 @@ pub mod pallet { pub trait Config: 'static + Eq + Clone { /// The basic call filter to use in Origin. All origins are built with this filter as base, /// except Root. - type BaseCallFilter: Contains; + type BaseCallFilter: Contains; /// Block & extrinsics weights: base values and limits. #[pallet::constant] @@ -215,17 +215,14 @@ pub mod pallet { #[pallet::constant] type BlockLength: Get; - /// The `RuntimeOrigin` type used by dispatchable calls. - type RuntimeOrigin: Into, Self::RuntimeOrigin>> + /// The `Origin` type used by dispatchable calls. + type Origin: Into, Self::Origin>> + From> + Clone - + OriginTrait; + + OriginTrait; - /// The aggregated `RuntimeCall` type. - type RuntimeCall: Parameter - + Dispatchable - + Debug - + From>; + /// The aggregated `Call` type. + type Call: Dispatchable + Debug; /// Account index (aka nonce) type. This stores the number of previous transactions /// associated with a sender account. @@ -236,8 +233,7 @@ pub mod pallet { + Default + MaybeDisplay + AtLeast32Bit - + Copy - + MaxEncodedLen; + + Copy; /// The block number type used by the runtime. type BlockNumber: Parameter @@ -296,11 +292,11 @@ pub mod pallet { type Header: Parameter + traits::Header; /// The aggregated event type of the runtime. - type RuntimeEvent: Parameter + type Event: Parameter + Member + From> + Debug - + IsType<::RuntimeEvent>; + + IsType<::Event>; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). #[pallet::constant] @@ -324,7 +320,7 @@ pub mod pallet { /// Data to be associated with an account (other than nonce/transaction counter, which this /// pallet does regardless). - type AccountData: Member + FullCodec + Clone + Default + TypeInfo + MaxEncodedLen; + type AccountData: Member + FullCodec + Clone + Default + TypeInfo; /// Handler for when a new account has just been created. type OnNewAccount: OnNewAccount; @@ -336,7 +332,7 @@ pub mod pallet { type SystemWeightInfo: WeightInfo; - /// The designated SS58 prefix of this chain. + /// The designated SS85 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is /// that the runtime should know about the prefix in order to make use of it as @@ -359,6 +355,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] + #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::hooks] @@ -565,7 +562,6 @@ pub mod pallet { /// The current weight for the block. #[pallet::storage] - #[pallet::whitelist_storage] #[pallet::getter(fn block_weight)] pub(super) type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; @@ -582,13 +578,11 @@ pub mod pallet { /// Extrinsics data for the current block (maps an extrinsic's index to its data). #[pallet::storage] #[pallet::getter(fn extrinsic_data)] - #[pallet::unbounded] pub(super) type ExtrinsicData = StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; /// The current block number being processed. Set by `execute_block`. #[pallet::storage] - #[pallet::whitelist_storage] #[pallet::getter(fn block_number)] pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; @@ -599,7 +593,6 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] - #[pallet::unbounded] #[pallet::getter(fn digest)] pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; @@ -611,14 +604,11 @@ pub mod pallet { /// Events have a large in-memory size. Box the events to not go out-of-memory /// just in case someone still reads them from within the runtime. #[pallet::storage] - #[pallet::whitelist_storage] - #[pallet::unbounded] pub(super) type Events = - StorageValue<_, Vec>>, ValueQuery>; + StorageValue<_, Vec>>, ValueQuery>; /// The number of events in the `Events` list. #[pallet::storage] - #[pallet::whitelist_storage] #[pallet::getter(fn event_count)] pub(super) type EventCount = StorageValue<_, EventIndex, ValueQuery>; @@ -633,14 +623,12 @@ pub mod pallet { /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. #[pallet::storage] - #[pallet::unbounded] #[pallet::getter(fn event_topics)] pub(super) type EventTopics = StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. #[pallet::storage] - #[pallet::unbounded] pub type LastRuntimeUpgrade = StorageValue<_, LastRuntimeUpgradeInfo>; /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. @@ -654,7 +642,6 @@ pub mod pallet { /// The execution phase of the block. #[pallet::storage] - #[pallet::whitelist_storage] pub(super) type ExecutionPhase = StorageValue<_, Phase>; #[cfg_attr(feature = "std", derive(Default))] @@ -703,7 +690,7 @@ pub type Key = Vec; pub type KeyValue = (Vec, Vec); /// A phase of a block's execution. -#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] pub enum Phase { /// Applying an extrinsic. @@ -751,7 +738,7 @@ type EventIndex = u32; pub type RefCount = u32; /// Information of an account. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct AccountInfo { /// The number of transactions this account has sent. pub nonce: Index, @@ -1224,7 +1211,7 @@ impl Pallet { } /// Deposits an event into this block's event record. - pub fn deposit_event(event: impl Into) { + pub fn deposit_event(event: impl Into) { Self::deposit_event_indexed(&[], event.into()); } @@ -1233,7 +1220,7 @@ impl Pallet { /// /// This will update storage entries that correspond to the specified topics. /// It is expected that light-clients could subscribe to this topics. - pub fn deposit_event_indexed(topics: &[T::Hash], event: T::RuntimeEvent) { + pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { let block_number = Self::block_number(); // Don't populate events on genesis. if block_number.is_zero() { @@ -1317,10 +1304,9 @@ impl Pallet { pub fn finalize() -> T::Header { log::debug!( target: "runtime::system", - "[{:?}] {} extrinsics, length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight:\ - {} ({}%) op weight {} ({}%) / mandatory weight {} ({}%)", + "[{:?}] length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight: {} ({}%) \ + / op weight {} ({}%) / mandatory weight {} ({}%)", Self::block_number(), - Self::extrinsic_index().unwrap_or_default(), Self::all_extrinsics_len(), sp_runtime::Percent::from_rational( Self::all_extrinsics_len(), @@ -1336,18 +1322,18 @@ impl Pallet { ).deconstruct(), Self::block_weight().get(DispatchClass::Normal), sp_runtime::Percent::from_rational( - Self::block_weight().get(DispatchClass::Normal).ref_time(), - T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()).ref_time() + *Self::block_weight().get(DispatchClass::Normal), + T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()) ).deconstruct(), Self::block_weight().get(DispatchClass::Operational), sp_runtime::Percent::from_rational( - Self::block_weight().get(DispatchClass::Operational).ref_time(), - T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()).ref_time() + *Self::block_weight().get(DispatchClass::Operational), + T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()) ).deconstruct(), Self::block_weight().get(DispatchClass::Mandatory), sp_runtime::Percent::from_rational( - Self::block_weight().get(DispatchClass::Mandatory).ref_time(), - T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()).ref_time() + *Self::block_weight().get(DispatchClass::Mandatory), + T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()) ).deconstruct(), ); ExecutionPhase::::kill(); @@ -1423,7 +1409,7 @@ impl Pallet { /// impact on the PoV size of a block. Users should use alternative and well bounded storage /// items for any behavior like this. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn events() -> Vec> { + pub fn events() -> Vec> { // Dereferencing the events here is fine since we are not in the // memory-restricted runtime. Self::read_events_no_consensus().into_iter().map(|e| *e).collect() @@ -1433,7 +1419,7 @@ impl Pallet { /// /// Should only be called if you know what you are doing and outside of the runtime block /// execution else it can have a large impact on the PoV size of a block. - pub fn read_events_no_consensus() -> Vec>> { + pub fn read_events_no_consensus() -> Vec>> { Events::::get() } @@ -1478,13 +1464,13 @@ impl Pallet { /// Assert the given `event` exists. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn assert_has_event(event: T::RuntimeEvent) { + pub fn assert_has_event(event: T::Event) { assert!(Self::events().iter().any(|record| record.event == event)) } /// Assert the last event equal to the given `event`. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn assert_last_event(event: T::RuntimeEvent) { + pub fn assert_last_event(event: T::Event) { assert_eq!(Self::events().last().expect("events expected").event, event); } @@ -1512,15 +1498,9 @@ impl Pallet { } /// To be called immediately after an extrinsic has been applied. - /// - /// Emits an `ExtrinsicSuccess` or `ExtrinsicFailed` event depending on the outcome. - /// The emitted event contains the post-dispatch corrected weight including - /// the base-weight for its dispatch class. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { - info.weight = extract_actual_weight(r, &info) - .saturating_add(T::BlockWeights::get().get(info.class).base_extrinsic); + info.weight = extract_actual_weight(r, &info); info.pays_fee = extract_actual_pays_fee(r, &info); - Self::deposit_event(match r { Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, Err(err) => { @@ -1710,7 +1690,7 @@ pub mod pallet_prelude { pub use crate::{ensure_none, ensure_root, ensure_signed, ensure_signed_or_root}; /// Type alias for the `Origin` associated type of system config. - pub type OriginFor = ::RuntimeOrigin; + pub type OriginFor = ::Origin; /// Type alias for the `BlockNumber` associated type of system config. pub type BlockNumberFor = ::BlockNumber; diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index eb95b699eba32..6076414ba6bcb 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -25,12 +25,9 @@ //! `DispatchClass`. This module contains configuration object for both resources, //! which should be passed to `frame_system` configuration when runtime is being set up. -use frame_support::{ - dispatch::{DispatchClass, OneOrMany, PerDispatchClass}, - weights::{constants, Weight}, -}; +use frame_support::weights::{constants, DispatchClass, OneOrMany, PerDispatchClass, Weight}; use scale_info::TypeInfo; -use sp_runtime::{traits::Bounded, Perbill, RuntimeDebug}; +use sp_runtime::{Perbill, RuntimeDebug}; /// Block length limit configuration. #[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] @@ -207,10 +204,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults( - Weight::from_parts(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), - DEFAULT_NORMAL_RATIO, - ) + Self::with_sensible_defaults(1 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) } } @@ -235,18 +229,15 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) - || max_for_class == Weight::zero(), + (max_for_class > self.base_block && max_for_class > base_for_class) + || max_for_class == 0, &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights - .max_extrinsic - .unwrap_or(Weight::zero()) - .all_lte(max_for_class.saturating_sub(base_for_class)), + weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -255,14 +246,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), + weights.max_extrinsic.unwrap_or_else(Weight::max_value) > 0, &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.all_gt(base_for_class) || reserved == Weight::zero(), + reserved > base_for_class || reserved == 0, &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -271,7 +262,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), + self.max_block >= weights.max_total.unwrap_or(0), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -280,7 +271,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.all_gt(base_for_class + self.base_block), + self.max_block > base_for_class + self.base_block, &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, @@ -303,9 +294,9 @@ impl BlockWeights { /// is not suitable for production deployments. pub fn simple_max(block_weight: Weight) -> Self { Self::builder() - .base_block(Weight::zero()) + .base_block(0) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = Weight::zero(); + weights.base_extrinsic = 0; }) .for_class(DispatchClass::non_mandatory(), |weights| { weights.max_total = block_weight.into(); @@ -342,10 +333,9 @@ impl BlockWeights { BlockWeightsBuilder { weights: BlockWeights { base_block: constants::BlockExecutionWeight::get(), - max_block: Weight::zero(), + max_block: 0, per_class: PerDispatchClass::new(|class| { - let initial = - if class == DispatchClass::Mandatory { None } else { Some(Weight::zero()) }; + let initial = if class == DispatchClass::Mandatory { None } else { Some(0) }; WeightsPerClass { base_extrinsic: constants::ExtrinsicBaseWeight::get(), max_extrinsic: None, @@ -408,7 +398,7 @@ impl BlockWeightsBuilder { // compute max block size. for class in DispatchClass::all() { weights.max_block = match weights.per_class.get(*class).max_total { - Some(max) => max.max(weights.max_block), + Some(max) if max > weights.max_block => max, _ => weights.max_block, }; } diff --git a/frame/system/src/migrations/mod.rs b/frame/system/src/migrations/mod.rs index 15746d7376ac5..f02af7a316fe1 100644 --- a/frame/system/src/migrations/mod.rs +++ b/frame/system/src/migrations/mod.rs @@ -81,7 +81,7 @@ pub fn migrate_from_single_u8_to_triple_ref_count() -> Wei ); >::put(true); >::put(true); - Weight::MAX + Weight::max_value() } /// Migrate from unique `u32` reference counting to triple `u32` reference counting. @@ -99,7 +99,7 @@ pub fn migrate_from_single_to_triple_ref_count() -> Weight translated ); >::put(true); - Weight::MAX + Weight::max_value() } /// Migrate from dual `u32` reference counting to triple `u32` reference counting. @@ -117,5 +117,5 @@ pub fn migrate_from_dual_to_triple_ref_count() -> Weight { translated ); >::put(true); - Weight::MAX + Weight::max_value() } diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index d31a1b08667e5..f3f542aa83a9a 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -26,6 +26,7 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; +use sp_std::cell::RefCell; type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; type Block = mocking::MockBlock; @@ -41,7 +42,7 @@ frame_support::construct_runtime!( ); const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024).set_proof_size(u64::MAX); +const MAX_BLOCK_WEIGHT: Weight = 1024; parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { @@ -59,15 +60,14 @@ parameter_types! { write: 100, }; pub RuntimeBlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(Weight::from_ref_time(10)) + .base_block(10) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = Weight::from_ref_time(5); + weights.base_extrinsic = 5; }) .for_class(DispatchClass::Normal, |weights| { weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); }) .for_class(DispatchClass::Operational, |weights| { - weights.base_extrinsic = Weight::from_ref_time(10); weights.max_total = Some(MAX_BLOCK_WEIGHT); weights.reserved = Some( MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT @@ -79,14 +79,14 @@ parameter_types! { limits::BlockLength::max_with_normal_ratio(1024, NORMAL_DISPATCH_RATIO); } -parameter_types! { - pub static Killed: Vec = vec![]; +thread_local! { + pub static KILLED: RefCell> = RefCell::new(vec![]); } pub struct RecordKilled; impl OnKilledAccount for RecordKilled { fn on_killed_account(who: &u64) { - Killed::mutate(|r| r.push(*who)) + KILLED.with(|r| r.borrow_mut().push(*who)) } } @@ -94,8 +94,8 @@ impl Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -103,7 +103,7 @@ impl Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<10>; type DbWeight = DbWeight; type Version = Version; @@ -120,8 +120,8 @@ impl Config for Test { pub type SysEvent = frame_system::Event; /// A simple call, which one doesn't matter. -pub const CALL: &::RuntimeCall = - &RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +pub const CALL: &::Call = + &Call::System(frame_system::Call::set_heap_pages { pages: 0u64 }); /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/system/src/mocking.rs b/frame/system/src/mocking.rs index d8cfcb9baf268..ccb63f9bb236c 100644 --- a/frame/system/src/mocking.rs +++ b/frame/system/src/mocking.rs @@ -22,7 +22,7 @@ use sp_runtime::generic; /// An unchecked extrinsic type to be used in tests. pub type MockUncheckedExtrinsic = generic::UncheckedExtrinsic< ::AccountId, - ::RuntimeCall, + ::Call, Signature, Extra, >; diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 99a4c1541d30f..86440188a765c 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -617,7 +617,7 @@ pub trait SignedPayload: Encode { #[cfg(test)] mod tests { use super::*; - use crate::mock::{RuntimeCall, Test as TestRuntime, CALL}; + use crate::mock::{Call, Test as TestRuntime, CALL}; use codec::Decode; use sp_core::offchain::{testing, TransactionPoolExt}; use sp_runtime::testing::{TestSignature, TestXt, UintAuthorityId}; @@ -627,11 +627,11 @@ mod tests { type Signature = TestSignature; } - type Extrinsic = TestXt; + type Extrinsic = TestXt; - impl SendTransactionTypes for TestRuntime { + impl SendTransactionTypes for TestRuntime { type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type OverarchingCall = Call; } #[derive(codec::Encode, codec::Decode)] diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index c42131c450228..417dca12045ee 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -18,9 +18,10 @@ use crate::*; use frame_support::{ assert_noop, assert_ok, - dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, + dispatch::PostDispatchInfo, + weights::{Pays, WithPostDispatchInfo}, }; -use mock::{RuntimeOrigin, *}; +use mock::{Origin, *}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, Header}, @@ -29,8 +30,8 @@ use sp_runtime::{ #[test] fn origin_works() { - let o = RuntimeOrigin::from(RawOrigin::::Signed(1u64)); - let x: Result, RuntimeOrigin> = o.into(); + let o = Origin::from(RawOrigin::::Signed(1u64)); + let x: Result, Origin> = o.into(); assert_eq!(x.unwrap(), RawOrigin::::Signed(1u64)); } @@ -54,9 +55,9 @@ fn stored_map_works() { System::dec_consumers(&0); assert!(!System::is_provider_required(&0)); - assert!(Killed::get().is_empty()); + assert!(KILLED.with(|r| r.borrow().is_empty())); assert_ok!(System::remove(&0)); - assert_eq!(Killed::get(), vec![0u64]); + assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); }); } @@ -169,10 +170,6 @@ fn deposit_event_should_work() { }] ); - let normal_base = ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - System::reset_events(); System::initialize(&2, &[0u8; 32].into(), &Default::default()); System::deposit_event(SysEvent::NewAccount { account: 32 }); @@ -198,17 +195,14 @@ fn deposit_event_should_work() { }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } - } - .into(), + event: SysEvent::ExtrinsicSuccess { dispatch_info: Default::default() }.into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + dispatch_info: Default::default() } .into(), topics: vec![] @@ -230,10 +224,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::initialize(&1, &[0u8; 32].into(), &Default::default()); System::note_finished_initialize(); - let normal_base = ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - let pre_info = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + let pre_info = DispatchInfo { weight: 1000, ..Default::default() }; System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); System::note_applied_extrinsic( @@ -245,10 +236,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::note_applied_extrinsic(&Ok(Pays::No.into()), pre_info); System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::No).into()), pre_info); System::note_applied_extrinsic(&Ok((Some(500), Pays::No).into()), pre_info); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(999))), - pre_info, - ); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.with_weight(999)), pre_info); System::note_applied_extrinsic( &Err(DispatchErrorWithPostInfo { @@ -259,186 +247,144 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { ); System::note_applied_extrinsic( &Err(DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { - actual_weight: Some(Weight::from_ref_time(800)), - pays_fee: Pays::Yes, - }, + post_info: PostDispatchInfo { actual_weight: Some(800), pays_fee: Pays::Yes }, error: DispatchError::BadOrigin, }), pre_info, ); System::note_applied_extrinsic( &Err(DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { - actual_weight: Some(Weight::from_ref_time(800)), - pays_fee: Pays::No, - }, + post_info: PostDispatchInfo { actual_weight: Some(800), pays_fee: Pays::No }, error: DispatchError::BadOrigin, }), pre_info, ); - // Also works for operational. - let operational_base = ::BlockWeights::get() - .get(DispatchClass::Operational) - .base_extrinsic; - assert!(normal_base != operational_base, "Test pre-condition violated"); - let pre_info = DispatchInfo { - weight: Weight::from_ref_time(1000), - class: DispatchClass::Operational, - ..Default::default() - }; - System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); - let got = System::events(); - let want = vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(300).saturating_add(normal_base), - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000).saturating_add(normal_base), - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000).saturating_add(normal_base), - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(3), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000).saturating_add(normal_base), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(4), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000).saturating_add(normal_base), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(5), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000).saturating_add(normal_base), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(6), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(500).saturating_add(normal_base), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(7), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(999).saturating_add(normal_base), - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(8), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000).saturating_add(normal_base), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(9), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800).saturating_add(normal_base), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(10), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800).saturating_add(normal_base), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(11), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(300).saturating_add(operational_base), - class: DispatchClass::Operational, - ..Default::default() - }, - } - .into(), - topics: vec![], - }, - ]; - for (i, event) in want.into_iter().enumerate() { - assert_eq!(got[i], event, "Event mismatch at index {}", i); - } + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { weight: 300, ..Default::default() }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { weight: 1000, ..Default::default() }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { weight: 1000, ..Default::default() }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(4), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(5), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(6), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: 500, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(7), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { weight: 999, ..Default::default() }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(8), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: 1000, + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(9), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: 800, + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(10), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: 800, + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![] + }, + ] + ); }); } @@ -672,16 +618,16 @@ fn ensure_signed_stuff_works() { } } - let signed_origin = RuntimeOrigin::signed(0u64); + let signed_origin = Origin::signed(0u64); assert_ok!(EnsureSigned::try_origin(signed_origin.clone())); assert_ok!(EnsureSignedBy::::try_origin(signed_origin)); #[cfg(feature = "runtime-benchmarks")] { - let successful_origin: RuntimeOrigin = EnsureSigned::successful_origin(); + let successful_origin: Origin = EnsureSigned::successful_origin(); assert_ok!(EnsureSigned::try_origin(successful_origin)); - let successful_origin: RuntimeOrigin = EnsureSignedBy::::successful_origin(); + let successful_origin: Origin = EnsureSignedBy::::successful_origin(); assert_ok!(EnsureSignedBy::::try_origin(successful_origin)); } } diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 696a6a09b8f80..19719032587ef 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for frame_system //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-06-02, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `ci3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet // --chain=dev @@ -35,7 +35,6 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/system/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -59,105 +58,89 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Minimum execution time: 3_951 nanoseconds. - Weight::from_ref_time(1_307_232 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(363 as u64).saturating_mul(b as u64)) + fn remark(_b: u32, ) -> Weight { + (1_000_000 as Weight) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - // Minimum execution time: 14_880 nanoseconds. - Weight::from_ref_time(15_173_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_424 as u64).saturating_mul(b as u64)) + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - // Minimum execution time: 9_819 nanoseconds. - Weight::from_ref_time(10_513_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (5_367_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. + /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - // Minimum execution time: 4_038 nanoseconds. - Weight::from_ref_time(4_098_000 as u64) - // Standard Error: 710 - .saturating_add(Weight::from_ref_time(620_813 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 1_000 + .saturating_add((603_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. + /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - // Minimum execution time: 3_972 nanoseconds. - Weight::from_ref_time(4_082_000 as u64) - // Standard Error: 884 - .saturating_add(Weight::from_ref_time(536_923 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 1_000 + .saturating_add((513_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `p` is `[0, 1000]`. + /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - // Minimum execution time: 5_703 nanoseconds. - Weight::from_ref_time(5_763_000 as u64) - // Standard Error: 1_248 - .saturating_add(Weight::from_ref_time(1_126_062 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) + (0 as Weight) + // Standard Error: 1_000 + .saturating_add((1_026_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } } // For backwards compatibility and tests impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Minimum execution time: 3_951 nanoseconds. - Weight::from_ref_time(1_307_232 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(363 as u64).saturating_mul(b as u64)) + fn remark(_b: u32, ) -> Weight { + (1_000_000 as Weight) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - // Minimum execution time: 14_880 nanoseconds. - Weight::from_ref_time(15_173_000 as u64) + (0 as Weight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_424 as u64).saturating_mul(b as u64)) + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - // Minimum execution time: 9_819 nanoseconds. - Weight::from_ref_time(10_513_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (5_367_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. + /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - // Minimum execution time: 4_038 nanoseconds. - Weight::from_ref_time(4_098_000 as u64) - // Standard Error: 710 - .saturating_add(Weight::from_ref_time(620_813 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 1_000 + .saturating_add((603_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[0, 1000]`. + /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - // Minimum execution time: 3_972 nanoseconds. - Weight::from_ref_time(4_082_000 as u64) - // Standard Error: 884 - .saturating_add(Weight::from_ref_time(536_923 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 1_000 + .saturating_add((513_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `p` is `[0, 1000]`. + /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - // Minimum execution time: 5_703 nanoseconds. - Weight::from_ref_time(5_763_000 as u64) - // Standard Error: 1_248 - .saturating_add(Weight::from_ref_time(1_126_062 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) + (0 as Weight) + // Standard Error: 1_000 + .saturating_add((1_026_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } } diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index ac495d84b2c1e..8967733f7c5c8 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -33,9 +33,8 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ - "sp-io?/std", "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 6a7f849d1329a..81ed67913c2e6 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -282,8 +282,6 @@ impl Pallet { #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] pub fn set_timestamp(now: T::Moment) { Now::::put(now); - DidUpdate::::put(true); - >::on_timestamp_set(now); } } diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 2208510f24fe5..9536414c54db6 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -19,6 +19,7 @@ use super::*; use crate as pallet_timestamp; +use sp_std::cell::RefCell; use frame_support::{ parameter_types, @@ -48,23 +49,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -77,14 +78,14 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } -parameter_types! { - pub static CapturedMoment: Option = None; +thread_local! { + pub static CAPTURED_MOMENT: RefCell> = RefCell::new(None); } pub struct MockOnTimestampSet; impl OnTimestampSet for MockOnTimestampSet { fn on_timestamp_set(moment: Moment) { - CapturedMoment::mutate(|x| *x = Some(moment)); + CAPTURED_MOMENT.with(|x| *x.borrow_mut() = Some(moment)); } } @@ -96,11 +97,11 @@ impl Config for Test { } pub(crate) fn clear_captured_moment() { - CapturedMoment::mutate(|x| *x = None); + CAPTURED_MOMENT.with(|x| *x.borrow_mut() = None); } pub(crate) fn get_captured_moment() -> Option { - CapturedMoment::get() + CAPTURED_MOMENT.with(|x| x.borrow().clone()) } pub(crate) fn new_test_ext() -> TestExternalities { diff --git a/frame/timestamp/src/tests.rs b/frame/timestamp/src/tests.rs index 6a76fbc4820e6..f52ba7849c951 100644 --- a/frame/timestamp/src/tests.rs +++ b/frame/timestamp/src/tests.rs @@ -23,8 +23,8 @@ use frame_support::assert_ok; #[test] fn timestamp_works() { new_test_ext().execute_with(|| { - crate::Now::::put(46); - assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::set(Origin::none(), 69)); assert_eq!(Timestamp::now(), 69); assert_eq!(Some(69), get_captured_moment()); }); @@ -35,7 +35,8 @@ fn timestamp_works() { fn double_timestamp_should_fail() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); - assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); + assert_ok!(Timestamp::set(Origin::none(), 69)); + let _ = Timestamp::set(Origin::none(), 70); }); } @@ -45,7 +46,7 @@ fn double_timestamp_should_fail() { )] fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { - crate::Now::::put(44); - let _ = Timestamp::set(RuntimeOrigin::none(), 46); + Timestamp::set_timestamp(42); + let _ = Timestamp::set(Origin::none(), 46); }); } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 52123920977da..6b4ebfa74dd87 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_timestamp //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/timestamp/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/timestamp/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,14 +54,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - // Minimum execution time: 11_331 nanoseconds. - Weight::from_ref_time(11_584_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (8_080_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_finalize() -> Weight { - // Minimum execution time: 5_280 nanoseconds. - Weight::from_ref_time(5_412_000 as u64) + (2_681_000 as Weight) } } @@ -73,13 +68,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - // Minimum execution time: 11_331 nanoseconds. - Weight::from_ref_time(11_584_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (8_080_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn on_finalize() -> Weight { - // Minimum execution time: 5_280 nanoseconds. - Weight::from_ref_time(5_412_000 as u64) + (2_681_000 as Weight) } } diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index b00a684c1c83b..e2ca152148db6 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -33,7 +33,6 @@ sp-storage = { version = "6.0.0", path = "../../primitives/storage" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 312424e5799ec..33e455bd3b9fd 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -92,20 +92,18 @@ benchmarks_instance_pallet! { report_awesome { let r in 0 .. T::MaximumReasonLength::get(); let (caller, reason, awesome_person) = setup_awesome::(r); - let awesome_person_lookup = T::Lookup::unlookup(awesome_person); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, awesome_person_lookup) + }: _(RawOrigin::Signed(caller), reason, awesome_person) retract_tip { let r = T::MaximumReasonLength::get(); let (caller, reason, awesome_person) = setup_awesome::(r); - let awesome_person_lookup = T::Lookup::unlookup(awesome_person.clone()); TipsMod::::report_awesome( RawOrigin::Signed(caller.clone()).into(), reason.clone(), - awesome_person_lookup + awesome_person.clone() )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); @@ -119,21 +117,19 @@ benchmarks_instance_pallet! { let t in 1 .. T::Tippers::max_len() as u32; let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; - let beneficiary_lookup = T::Lookup::unlookup(beneficiary); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, beneficiary_lookup, value) + }: _(RawOrigin::Signed(caller), reason, beneficiary, value) tip { let t in 1 .. T::Tippers::max_len() as u32; let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary_lookup, + beneficiary.clone(), value )?; let reason_hash = T::Hashing::hash(&reason[..]); @@ -154,12 +150,11 @@ benchmarks_instance_pallet! { // Set up a new tip proposal let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary_lookup, + beneficiary.clone(), value )?; @@ -184,20 +179,18 @@ benchmarks_instance_pallet! { // Set up a new tip proposal let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary_lookup, + beneficiary.clone(), value )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); ensure!(Tips::::contains_key(hash), "tip does not exist"); - let reject_origin = T::RejectOrigin::successful_origin(); - }: _(reject_origin, hash) + }: _(RawOrigin::Root, hash) impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 9313a26e52e00..71af87b42b55b 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -61,7 +61,7 @@ pub mod migrations; pub mod weights; use sp_runtime::{ - traits::{AccountIdConversion, BadOrigin, Hash, StaticLookup, TrailingZeroInput, Zero}, + traits::{AccountIdConversion, BadOrigin, Hash, TrailingZeroInput, Zero}, Percent, RuntimeDebug, }; use sp_std::prelude::*; @@ -80,7 +80,6 @@ pub use weights::WeightInfo; pub type BalanceOf = pallet_treasury::BalanceOf; pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. @@ -127,8 +126,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + pallet_treasury::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Maximum acceptable reason length. /// @@ -239,10 +237,9 @@ pub mod pallet { pub fn report_awesome( origin: OriginFor, reason: Vec, - who: AccountIdLookupOf, + who: T::AccountId, ) -> DispatchResult { let finder = ensure_signed(origin)?; - let who = T::Lookup::lookup(who)?; ensure!( reason.len() <= T::MaximumReasonLength::get() as usize, @@ -334,11 +331,10 @@ pub mod pallet { pub fn tip_new( origin: OriginFor, reason: Vec, - who: AccountIdLookupOf, + who: T::AccountId, #[pallet::compact] tip_value: BalanceOf, ) -> DispatchResult { let tipper = ensure_signed(origin)?; - let who = T::Lookup::lookup(who)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); diff --git a/frame/tips/src/migrations/v4.rs b/frame/tips/src/migrations/v4.rs index 5e10fa7dd2c6d..34f7a43ec12de 100644 --- a/frame/tips/src/migrations/v4.rs +++ b/frame/tips/src/migrations/v4.rs @@ -49,7 +49,7 @@ pub fn migrate::on_chain_storage_version(); @@ -84,7 +84,7 @@ pub fn migrate; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -92,23 +94,24 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } -parameter_types! { - static TenToFourteenTestValue: Vec = vec![10,11,12,13,14]; +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } pub struct TenToFourteen; impl SortedMembers for TenToFourteen { fn sorted_members() -> Vec { - TenToFourteenTestValue::get().clone() + TEN_TO_FOURTEEN.with(|v| v.borrow().clone()) } #[cfg(feature = "runtime-benchmarks")] fn add(new: &u128) { - TenToFourteenTestValue::mutate(|members| { + TEN_TO_FOURTEEN.with(|v| { + let mut members = v.borrow_mut(); members.push(*new); members.sort(); }) @@ -116,7 +119,7 @@ impl SortedMembers for TenToFourteen { } impl ContainsLengthBound for TenToFourteen { fn max_len() -> usize { - TenToFourteenTestValue::get().len() + TEN_TO_FOURTEEN.with(|v| v.borrow().len()) } fn min_len() -> usize { 0 @@ -133,7 +136,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -152,7 +155,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -176,7 +179,7 @@ impl Config for Test { type TipFindersFee = TipFindersFee; type TipReportDepositBase = ConstU64<1>; type DataDepositPerByte = ConstU64<1>; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); } @@ -187,7 +190,7 @@ impl Config for Test { type TipFindersFee = TipFindersFee; type TipReportDepositBase = ConstU64<1>; type DataDepositPerByte = ConstU64<1>; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type WeightInfo = (); } @@ -209,7 +212,7 @@ fn last_event() -> TipEvent { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::Tips(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let Event::Tips(inner) = e { Some(inner) } else { None }) .last() .unwrap() } @@ -230,9 +233,9 @@ fn tip_hash() -> H256 { fn tip_new_cannot_be_used_twice() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); assert_noop!( - Tips::tip_new(RuntimeOrigin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Tips::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), Error::::AlreadyKnown ); }); @@ -242,23 +245,23 @@ fn tip_new_cannot_be_used_twice() { fn report_awesome_and_tip_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); // other reports don't count. assert_noop!( - Tips::report_awesome(RuntimeOrigin::signed(1), b"awesome.dot".to_vec(), 3), + Tips::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), Error::::AlreadyKnown ); let h = tip_hash(); - assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); - assert_noop!(Tips::tip(RuntimeOrigin::signed(9), h, 10), BadOrigin); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips::tip(Origin::signed(9), h.clone(), 10), BadOrigin); System::set_block_number(2); - assert_ok!(Tips::close_tip(RuntimeOrigin::signed(100), h.into())); + assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 102); assert_eq!(Balances::free_balance(3), 8); @@ -269,15 +272,15 @@ fn report_awesome_and_tip_works() { fn report_awesome_from_beneficiary_and_tip_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 0)); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); System::set_block_number(2); - assert_ok!(Tips::close_tip(RuntimeOrigin::signed(100), h.into())); + assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 110); }); @@ -291,33 +294,30 @@ fn close_tip_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); let h = tip_hash(); assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_noop!(Tips::close_tip(RuntimeOrigin::signed(0), h.into()), Error::::StillOpen); + assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); assert_eq!(last_event(), TipEvent::TipClosing { tip_hash: h }); - assert_noop!(Tips::close_tip(RuntimeOrigin::signed(0), h.into()), Error::::Premature); + assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::Premature); System::set_block_number(2); - assert_noop!(Tips::close_tip(RuntimeOrigin::none(), h.into()), BadOrigin); - assert_ok!(Tips::close_tip(RuntimeOrigin::signed(0), h.into())); + assert_noop!(Tips::close_tip(Origin::none(), h.into()), BadOrigin); + assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); assert_eq!(last_event(), TipEvent::TipClosed { tip_hash: h, who: 3, payout: 10 }); - assert_noop!( - Tips::close_tip(RuntimeOrigin::signed(100), h.into()), - Error::::UnknownTip - ); + assert_noop!(Tips::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); }); } @@ -331,7 +331,7 @@ fn slash_tip_works() { assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100); - assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); @@ -340,10 +340,10 @@ fn slash_tip_works() { assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); // can't remove from any origin - assert_noop!(Tips::slash_tip(RuntimeOrigin::signed(0), h), BadOrigin); + assert_noop!(Tips::slash_tip(Origin::signed(0), h.clone()), BadOrigin); // can remove from root. - assert_ok!(Tips::slash_tip(RuntimeOrigin::root(), h)); + assert_ok!(Tips::slash_tip(Origin::root(), h.clone())); assert_eq!(last_event(), TipEvent::TipSlashed { tip_hash: h, finder: 0, deposit: 12 }); // tipper slashed @@ -357,32 +357,26 @@ fn retract_tip_works() { new_test_ext().execute_with(|| { // with report awesome Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); - assert_noop!(Tips::retract_tip(RuntimeOrigin::signed(10), h), Error::::NotFinder); - assert_ok!(Tips::retract_tip(RuntimeOrigin::signed(0), h)); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(0), h.clone())); System::set_block_number(2); - assert_noop!( - Tips::close_tip(RuntimeOrigin::signed(0), h.into()), - Error::::UnknownTip - ); + assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); // with tip new Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); let h = tip_hash(); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); - assert_noop!(Tips::retract_tip(RuntimeOrigin::signed(0), h), Error::::NotFinder); - assert_ok!(Tips::retract_tip(RuntimeOrigin::signed(10), h)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(10), h.clone())); System::set_block_number(2); - assert_noop!( - Tips::close_tip(RuntimeOrigin::signed(10), h.into()), - Error::::UnknownTip - ); + assert_noop!(Tips::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); }); } @@ -390,12 +384,12 @@ fn retract_tip_works() { fn tip_median_calculation_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); let h = tip_hash(); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 1000000)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000000)); System::set_block_number(2); - assert_ok!(Tips::close_tip(RuntimeOrigin::signed(0), h.into())); + assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); }); } @@ -404,17 +398,17 @@ fn tip_median_calculation_works() { fn tip_changing_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); let h = tip_hash(); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10000)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10000)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(13), h, 0)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(14), h, 0)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 1000)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 100)); - assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10000)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10000)); + assert_ok!(Tips::tip(Origin::signed(13), h.clone(), 0)); + assert_ok!(Tips::tip(Origin::signed(14), h.clone(), 0)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 100)); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); System::set_block_number(2); - assert_ok!(Tips::close_tip(RuntimeOrigin::signed(0), h.into())); + assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); }); } @@ -591,24 +585,24 @@ fn report_awesome_and_tip_works_second_instance() { assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201); - assert_ok!(Tips1::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips1::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); // duplicate report in tips1 reports don't count. assert_noop!( - Tips1::report_awesome(RuntimeOrigin::signed(1), b"awesome.dot".to_vec(), 3), + Tips1::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), Error::::AlreadyKnown ); // but tips is separate - assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips1::tip(RuntimeOrigin::signed(10), h, 10)); - assert_ok!(Tips1::tip(RuntimeOrigin::signed(11), h, 10)); - assert_ok!(Tips1::tip(RuntimeOrigin::signed(12), h, 10)); - assert_noop!(Tips1::tip(RuntimeOrigin::signed(9), h, 10), BadOrigin); + assert_ok!(Tips1::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips1::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips1::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips1::tip(Origin::signed(9), h.clone(), 10), BadOrigin); System::set_block_number(2); - assert_ok!(Tips1::close_tip(RuntimeOrigin::signed(100), h.into())); + assert_ok!(Tips1::close_tip(Origin::signed(100), h.into())); // Treasury 1 unchanged assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); // Treasury 2 gave the funds diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 1aa3fd8fa2eb7..4979618473fd1 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_tips //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/tips/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/tips/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -60,72 +57,60 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) - /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { - // Minimum execution time: 35_458 nanoseconds. - Weight::from_ref_time(36_920_009 as u64) - // Standard Error: 252 - .saturating_add(Weight::from_ref_time(1_835 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (30_669_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - // Minimum execution time: 34_322 nanoseconds. - Weight::from_ref_time(35_292_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (28_768_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) - /// The range of component `r` is `[0, 300]`. - /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { - // Minimum execution time: 26_691 nanoseconds. - Weight::from_ref_time(27_313_497 as u64) - // Standard Error: 141 - .saturating_add(Weight::from_ref_time(818 as u64).saturating_mul(r as u64)) - // Standard Error: 3_352 - .saturating_add(Weight::from_ref_time(108_557 as u64).saturating_mul(t as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (20_385_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 3_000 + .saturating_add((166_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) - /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { - // Minimum execution time: 17_464 nanoseconds. - Weight::from_ref_time(17_621_090 as u64) - // Standard Error: 3_702 - .saturating_add(Weight::from_ref_time(269_919 as u64).saturating_mul(t as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (12_287_000 as Weight) + // Standard Error: 6_000 + .saturating_add((363_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) - /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { - // Minimum execution time: 52_221 nanoseconds. - Weight::from_ref_time(53_168_303 as u64) - // Standard Error: 6_591 - .saturating_add(Weight::from_ref_time(243_706 as u64).saturating_mul(t as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (45_656_000 as Weight) + // Standard Error: 14_000 + .saturating_add((276_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) - /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { - // Minimum execution time: 22_911 nanoseconds. - Weight::from_ref_time(23_750_488 as u64) - // Standard Error: 2_561 - .saturating_add(Weight::from_ref_time(12_282 as u64).saturating_mul(t as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (18_525_000 as Weight) + // Standard Error: 5_000 + .saturating_add((37_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } @@ -133,71 +118,59 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) - /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { - // Minimum execution time: 35_458 nanoseconds. - Weight::from_ref_time(36_920_009 as u64) - // Standard Error: 252 - .saturating_add(Weight::from_ref_time(1_835 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (30_669_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - // Minimum execution time: 34_322 nanoseconds. - Weight::from_ref_time(35_292_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (28_768_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) - /// The range of component `r` is `[0, 300]`. - /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { - // Minimum execution time: 26_691 nanoseconds. - Weight::from_ref_time(27_313_497 as u64) - // Standard Error: 141 - .saturating_add(Weight::from_ref_time(818 as u64).saturating_mul(r as u64)) - // Standard Error: 3_352 - .saturating_add(Weight::from_ref_time(108_557 as u64).saturating_mul(t as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (20_385_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 3_000 + .saturating_add((166_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) - /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { - // Minimum execution time: 17_464 nanoseconds. - Weight::from_ref_time(17_621_090 as u64) - // Standard Error: 3_702 - .saturating_add(Weight::from_ref_time(269_919 as u64).saturating_mul(t as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (12_287_000 as Weight) + // Standard Error: 6_000 + .saturating_add((363_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) - /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { - // Minimum execution time: 52_221 nanoseconds. - Weight::from_ref_time(53_168_303 as u64) - // Standard Error: 6_591 - .saturating_add(Weight::from_ref_time(243_706 as u64).saturating_mul(t as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (45_656_000 as Weight) + // Standard Error: 14_000 + .saturating_add((276_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) - /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { - // Minimum execution time: 22_911 nanoseconds. - Weight::from_ref_time(23_750_488 as u64) - // Standard Error: 2_561 - .saturating_add(Weight::from_ref_time(12_282 as u64).saturating_mul(t as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (18_525_000 as Weight) + // Standard Error: 5_000 + .saturating_add((37_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 9150f87c7175a..51aeeabe99db8 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -26,7 +26,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index 2c1247cfc557a..2d4da250212f2 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -29,7 +29,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" serde = { version = "1.0.136", optional = true } [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" sp-storage = { version = "6.0.0", default-features = false, path = "../../../primitives/storage" } @@ -41,7 +41,6 @@ pallet-balances = { version = "4.0.0-dev", path = "../../balances" } [features] default = ["std"] std = [ - "scale-info/std", "serde", "codec/std", "sp-std/std", diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index e136da8b0bb75..08561375247ae 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -39,7 +39,7 @@ use sp_std::prelude::*; use codec::{Decode, Encode}; use frame_support::{ - dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + dispatch::DispatchResult, traits::{ tokens::{ fungibles::{Balanced, CreditOf, Inspect}, @@ -47,6 +47,7 @@ use frame_support::{ }, IsType, }, + weights::{DispatchInfo, PostDispatchInfo}, DefaultNoBound, }; use pallet_transaction_payment::OnChargeTransaction; @@ -113,7 +114,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + pallet_transaction_payment::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The fungibles instance used to pay for transactions in assets. type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. @@ -153,7 +154,7 @@ pub struct ChargeAssetTxPayment { impl ChargeAssetTxPayment where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, @@ -169,8 +170,8 @@ where fn withdraw_fee( &self, who: &T::AccountId, - call: &T::RuntimeCall, - info: &DispatchInfoOf, + call: &T::Call, + info: &DispatchInfoOf, len: usize, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); @@ -210,7 +211,7 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { impl SignedExtension for ChargeAssetTxPayment where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, @@ -218,7 +219,7 @@ where { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = T::Call; type AdditionalSigned = (); type Pre = ( // tip diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs index 80ff4e40dcffa..394696cc18929 100644 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -54,8 +54,8 @@ pub trait OnChargeAssetTransaction { /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - call: &T::RuntimeCall, - dispatch_info: &DispatchInfoOf, + call: &T::Call, + dispatch_info: &DispatchInfoOf, asset_id: Self::AssetId, fee: Self::Balance, tip: Self::Balance, @@ -68,8 +68,8 @@ pub trait OnChargeAssetTransaction { /// Note: The `fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - dispatch_info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, @@ -114,8 +114,8 @@ where /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - _call: &T::RuntimeCall, - _info: &DispatchInfoOf, + _call: &T::Call, + _info: &DispatchInfoOf, asset_id: Self::AssetId, fee: Self::Balance, _tip: Self::Balance, @@ -142,8 +142,8 @@ where /// Note: The `corrected_fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - _dispatch_info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, + _dispatch_info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, _tip: Self::Balance, paid: Self::LiquidityInfo, diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index cfed1c33c9b24..08b17a6bf459c 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -18,11 +18,10 @@ use crate as pallet_asset_tx_payment; use frame_support::{ assert_ok, - dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, pallet_prelude::*, parameter_types, - traits::{fungibles::Mutate, AsEnsureOriginWithArg, ConstU32, ConstU64, ConstU8, FindAuthor}, - weights::{Weight, WeightToFee as WeightToFeeT}, + traits::{fungibles::Mutate, ConstU32, ConstU64, ConstU8, FindAuthor}, + weights::{DispatchClass, DispatchInfo, PostDispatchInfo, Weight, WeightToFee as WeightToFeeT}, ConsensusEngineId, }; use frame_system as system; @@ -34,6 +33,7 @@ use sp_runtime::{ testing::Header, traits::{BlakeTwo256, ConvertInto, IdentityLookup, SaturatedConversion, StaticLookup}, }; +use std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -55,23 +55,23 @@ frame_support::construct_runtime!( } ); -const CALL: &::RuntimeCall = - &RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); +const CALL: &::Call = + &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); -parameter_types! { - static ExtrinsicBaseWeight: Weight = Weight::zero(); +thread_local! { + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); } pub struct BlockWeights; impl Get for BlockWeights { fn get() -> frame_system::limits::BlockWeights { frame_system::limits::BlockWeights::builder() - .base_block(Weight::zero()) + .base_block(0) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); + weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); + weights.max_total = 1024.into(); }) .build_or_panic() } @@ -87,16 +87,16 @@ impl frame_system::Config for Runtime { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -115,7 +115,7 @@ parameter_types! { impl pallet_balances::Config for Runtime { type Balance = Balance; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; @@ -129,8 +129,7 @@ impl WeightToFeeT for WeightToFee { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()) - .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) + Self::Balance::saturated_from(*weight).saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) } } @@ -138,13 +137,13 @@ impl WeightToFeeT for TransactionByteFee { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()) + Self::Balance::saturated_from(*weight) .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) } } impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; @@ -153,11 +152,10 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Balance = Balance; type AssetId = u32; type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; type AssetDeposit = ConstU64<2>; type AssetAccountDeposit = ConstU64<2>; @@ -200,7 +198,7 @@ impl HandleCredit for CreditToBlockAuthor { } impl Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Fungibles = Assets; type OnChargeAssetTransaction = FungiblesAdapter< pallet_assets::BalanceToAssetBalance, @@ -210,24 +208,19 @@ impl Config for Runtime { pub struct ExtBuilder { balance_factor: u64, - base_weight: Weight, + base_weight: u64, byte_fee: u64, weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { - balance_factor: 1, - base_weight: Weight::from_ref_time(0), - byte_fee: 1, - weight_to_fee: 1, - } + Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } } } impl ExtBuilder { - pub fn base_weight(mut self, base_weight: Weight) -> Self { + pub fn base_weight(mut self, base_weight: u64) -> Self { self.base_weight = base_weight; self } @@ -236,7 +229,7 @@ impl ExtBuilder { self } fn set_constants(&self) { - ExtrinsicBaseWeight::mutate(|v| *v = self.base_weight); + EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); } @@ -290,19 +283,19 @@ fn transaction_payment_in_native_possible() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .build() .execute_with(|| { let len = 10; let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(5)), len) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(5)), + &info_from_weight(5), &default_post_info(), len, &Ok(()) @@ -310,15 +303,15 @@ fn transaction_payment_in_native_possible() { assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(100)), - &post_info_from_weight(Weight::from_ref_time(50)), + &info_from_weight(100), + &post_info_from_weight(50), len, &Ok(()) )); @@ -332,14 +325,14 @@ fn transaction_payment_in_asset_possible() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -358,7 +351,7 @@ fn transaction_payment_in_asset_possible() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -368,7 +361,7 @@ fn transaction_payment_in_asset_possible() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(weight)), + &info_from_weight(weight), &default_post_info(), len, &Ok(()) @@ -385,14 +378,14 @@ fn transaction_payment_without_fee() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -411,7 +404,7 @@ fn transaction_payment_without_fee() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -421,7 +414,7 @@ fn transaction_payment_without_fee() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(weight)), + &info_from_weight(weight), &post_info_from_pays(Pays::No), len, &Ok(()) @@ -438,14 +431,14 @@ fn asset_transaction_payment_with_tip_and_refund() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -466,15 +459,15 @@ fn asset_transaction_payment_with_tip_and_refund() { let fee_with_tip = (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(weight)), - &post_info_from_weight(Weight::from_ref_time(final_weight)), + &info_from_weight(weight), + &post_info_from_weight(final_weight), len, &Ok(()) )); @@ -490,14 +483,14 @@ fn payment_from_account_with_only_assets() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -518,7 +511,7 @@ fn payment_from_account_with_only_assets() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset @@ -526,7 +519,7 @@ fn payment_from_account_with_only_assets() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(weight)), + &info_from_weight(weight), &default_post_info(), len, &Ok(()) @@ -541,7 +534,7 @@ fn payment_only_with_existing_sufficient_asset() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { let asset_id = 1; @@ -550,13 +543,13 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .is_err()); // create the non-sufficient asset let min_balance = 2; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ false, /* is_sufficient */ @@ -564,7 +557,7 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .is_err()); }); } @@ -574,14 +567,14 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 1; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -618,14 +611,14 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { assert_eq!(Assets::balance(asset_id, caller), balance); } let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(weight)), + &info_from_weight(weight), &default_post_info(), len, &Ok(()) @@ -639,14 +632,14 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 100; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -696,14 +689,14 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(Weight::from_ref_time(base_weight)) + .base_weight(base_weight) .build() .execute_with(|| { // create the asset let asset_id = 1; let min_balance = 100; assert_ok!(Assets::force_create( - RuntimeOrigin::root(), + Origin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -720,7 +713,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let len = 1; ChargeAssetTxPayment::::pre_dispatch_unsigned( CALL, - &info_from_weight(Weight::from_ref_time(weight)), + &info_from_weight(weight), len, ) .unwrap(); @@ -731,7 +724,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( None, - &info_from_weight(Weight::from_ref_time(weight)), + &info_from_weight(weight), &post_info_from_pays(Pays::Yes), len, &Ok(()) diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 9dd42c12c8bbf..16c2cc55efefb 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -21,4 +21,3 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } -sp-weights = { version = "4.0.0", path = "../../../primitives/weights" } diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index c0b816684a2f3..5e1cb46753524 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -17,7 +17,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../transaction-payment" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../primitives/runtime" } -sp-weights = { version = "4.0.0", default-features = false, path = "../../../../primitives/weights" } [features] default = ["std"] @@ -26,5 +25,4 @@ std = [ "pallet-transaction-payment/std", "sp-api/std", "sp-runtime/std", - "sp-weights/std", ] diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 10fd2a9e61fc1..5a0c70138db24 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -25,26 +25,10 @@ use sp_runtime::traits::MaybeDisplay; pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; sp_api::decl_runtime_apis! { - #[api_version(2)] pub trait TransactionPaymentApi where Balance: Codec + MaybeDisplay, { - #[changed_in(2)] - fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } - - #[api_version(2)] - pub trait TransactionPaymentCallApi - where - Balance: Codec + MaybeDisplay, - Call: Codec, - { - /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. - fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo; - - /// Query fee details of a given encoded `Call`. - fn query_call_fee_details(call: Call, len: u32) -> FeeDetails; - } } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 19007d37963ec..75ec42321ef5e 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -21,12 +21,12 @@ use std::{convert::TryInto, sync::Arc}; use codec::{Codec, Decode}; use jsonrpsee::{ - core::{Error as JsonRpseeError, RpcResult}, + core::{async_trait, Error as JsonRpseeError, RpcResult}, proc_macros::rpc, types::error::{CallError, ErrorCode, ErrorObject}, }; use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_rpc::number::NumberOrHex; @@ -81,11 +81,10 @@ impl From for i32 { } } +#[async_trait] impl - TransactionPaymentApiServer< - ::Hash, - RuntimeDispatchInfo, - > for TransactionPayment + TransactionPaymentApiServer<::Hash, RuntimeDispatchInfo> + for TransactionPayment where Block: BlockT, C: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, @@ -96,7 +95,7 @@ where &self, encoded_xt: Bytes, at: Option, - ) -> RpcResult> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); @@ -109,41 +108,14 @@ where Some(format!("{:?}", e)), )) })?; - - fn map_err(error: impl ToString, desc: &'static str) -> CallError { + api.query_info(&at, uxt, encoded_len).map_err(|e| { CallError::Custom(ErrorObject::owned( Error::RuntimeError.into(), - desc, - Some(error.to_string()), + "Unable to query dispatch info.", + Some(e.to_string()), )) - } - - let api_version = api - .api_version::>(&at) - .map_err(|e| map_err(e, "Failed to get transaction payment runtime api version"))? - .ok_or_else(|| { - CallError::Custom(ErrorObject::owned( - Error::RuntimeError.into(), - "Transaction payment runtime api wasn't found in the runtime", - None::, - )) - })?; - - if api_version < 2 { - #[allow(deprecated)] - api.query_info_before_version_2(&at, uxt, encoded_len) - .map_err(|e| map_err(e, "Unable to query dispatch info.").into()) - } else { - let res = api - .query_info(&at, uxt, encoded_len) - .map_err(|e| map_err(e, "Unable to query dispatch info."))?; - - Ok(RuntimeDispatchInfo { - weight: sp_weights::OldWeight(res.weight.ref_time()), - class: res.class, - partial_fee: res.partial_fee, - }) - } + .into() + }) } fn query_fee_details( diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index ce747fa6bd85c..fe37acb214452 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -63,11 +63,11 @@ use sp_runtime::{ use sp_std::prelude::*; use frame_support::{ - dispatch::{ - DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo, - }, + dispatch::DispatchResult, traits::{EstimateCallFee, Get}, - weights::{Weight, WeightToFee}, + weights::{ + DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, WeightToFee, + }, }; mod payment; @@ -127,14 +127,12 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction -pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M, X)>); +pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); /// Something that can convert the current multiplier to the next one. pub trait MultiplierUpdate: Convert { - /// Minimum multiplier. Any outcome of the `convert` function should be at least this. + /// Minimum multiplier fn min() -> Multiplier; - /// Maximum multiplier. Any outcome of the `convert` function should be less or equal this. - fn max() -> Multiplier; /// Target block saturation level fn target() -> Perquintill; /// Variability factor @@ -145,9 +143,6 @@ impl MultiplierUpdate for () { fn min() -> Multiplier { Default::default() } - fn max() -> Multiplier { - ::max_value() - } fn target() -> Perquintill { Default::default() } @@ -156,20 +151,16 @@ impl MultiplierUpdate for () { } } -impl MultiplierUpdate for TargetedFeeAdjustment +impl MultiplierUpdate for TargetedFeeAdjustment where T: frame_system::Config, S: Get, V: Get, M: Get, - X: Get, { fn min() -> Multiplier { M::get() } - fn max() -> Multiplier { - X::get() - } fn target() -> Perquintill { S::get() } @@ -178,20 +169,18 @@ where } } -impl Convert for TargetedFeeAdjustment +impl Convert for TargetedFeeAdjustment where T: frame_system::Config, S: Get, V: Get, M: Get, - X: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless // we recover here in case of errors, because any value below this would be stale and can // never change. let min_multiplier = M::get(); - let max_multiplier = X::get(); let previous = previous.max(min_multiplier); let weights = T::BlockWeights::get(); @@ -200,11 +189,7 @@ where weights.get(DispatchClass::Normal).max_total.unwrap_or(weights.max_block); let current_block_weight = >::block_weight(); let normal_block_weight = - current_block_weight.get(DispatchClass::Normal).min(normal_max_weight); - - // TODO: Handle all weight dimensions - let normal_max_weight = normal_max_weight.ref_time(); - let normal_block_weight = normal_block_weight.ref_time(); + *current_block_weight.get(DispatchClass::Normal).min(&normal_max_weight); let s = S::get(); let v = V::get(); @@ -228,42 +213,15 @@ where if positive { let excess = first_term.saturating_add(second_term).saturating_mul(previous); - previous.saturating_add(excess).clamp(min_multiplier, max_multiplier) + previous.saturating_add(excess).max(min_multiplier) } else { // Defensive-only: first_term > second_term. Safe subtraction. let negative = first_term.saturating_sub(second_term).saturating_mul(previous); - previous.saturating_sub(negative).clamp(min_multiplier, max_multiplier) + previous.saturating_sub(negative).max(min_multiplier) } } } -/// A struct to make the fee multiplier a constant -pub struct ConstFeeMultiplier>(sp_std::marker::PhantomData); - -impl> MultiplierUpdate for ConstFeeMultiplier { - fn min() -> Multiplier { - M::get() - } - fn max() -> Multiplier { - M::get() - } - fn target() -> Perquintill { - Default::default() - } - fn variability() -> Multiplier { - Default::default() - } -} - -impl Convert for ConstFeeMultiplier -where - M: Get, -{ - fn convert(_previous: Multiplier) -> Multiplier { - Self::min() - } -} - /// Storage releases of the pallet. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] enum Releases { @@ -279,10 +237,6 @@ impl Default for Releases { } } -/// Default value for NextFeeMultiplier. This is used in genesis and is also used in -/// NextFeeMultiplierOnEmpty() to provide a value when none exists in storage. -const MULTIPLIER_DEFAULT_VALUE: Multiplier = Multiplier::from_u32(1); - #[frame_support::pallet] pub mod pallet { use super::*; @@ -296,7 +250,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Handler for withdrawing, refunding and depositing the transaction fee. /// Transaction fees are withdrawn before the transaction is executed. @@ -342,7 +296,7 @@ pub mod pallet { #[pallet::type_value] pub fn NextFeeMultiplierOnEmpty() -> Multiplier { - MULTIPLIER_DEFAULT_VALUE + Multiplier::saturating_from_integer(1) } #[pallet::storage] @@ -354,14 +308,12 @@ pub mod pallet { pub(super) type StorageVersion = StorageValue<_, Releases, ValueQuery>; #[pallet::genesis_config] - pub struct GenesisConfig { - pub multiplier: Multiplier, - } + pub struct GenesisConfig; #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { multiplier: MULTIPLIER_DEFAULT_VALUE } + Self } } @@ -369,7 +321,6 @@ pub mod pallet { impl GenesisBuild for GenesisConfig { fn build(&self) { StorageVersion::::put(Releases::V2); - NextFeeMultiplier::::put(self.multiplier); } } @@ -396,7 +347,7 @@ pub mod pallet { assert!( ::max_value() >= Multiplier::checked_from_integer::( - T::BlockWeights::get().max_block.ref_time().try_into().unwrap() + T::BlockWeights::get().max_block.try_into().unwrap() ) .unwrap(), ); @@ -408,9 +359,8 @@ pub mod pallet { ); // add 1 percent; let addition = target / 100; - if addition == Weight::zero() { - // this is most likely because in a test setup we set everything to () - // or to `ConstFeeMultiplier`. + if addition == 0 { + // this is most likely because in a test setup we set everything to (). return } @@ -454,7 +404,7 @@ where len: u32, ) -> RuntimeDispatchInfo> where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in @@ -481,7 +431,7 @@ where len: u32, ) -> FeeDetails> where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); @@ -495,40 +445,10 @@ where } } - /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. - pub fn query_call_info(call: T::RuntimeCall, len: u32) -> RuntimeDispatchInfo> - where - T::RuntimeCall: Dispatchable + GetDispatchInfo, - { - let dispatch_info = ::get_dispatch_info(&call); - let DispatchInfo { weight, class, .. } = dispatch_info; - - RuntimeDispatchInfo { - weight, - class, - partial_fee: Self::compute_fee(len, &dispatch_info, 0u32.into()), - } - } - - /// Query fee details of a given encoded `Call`. - pub fn query_call_fee_details(call: T::RuntimeCall, len: u32) -> FeeDetails> - where - T::RuntimeCall: Dispatchable + GetDispatchInfo, - { - let dispatch_info = ::get_dispatch_info(&call); - let tip = 0u32.into(); - - Self::compute_fee_details(len, &dispatch_info, tip) - } - /// Compute the final fee value for a particular transaction. - pub fn compute_fee( - len: u32, - info: &DispatchInfoOf, - tip: BalanceOf, - ) -> BalanceOf + pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { Self::compute_fee_details(len, info, tip).final_fee() } @@ -536,11 +456,11 @@ where /// Compute the fee details for a particular transaction. pub fn compute_fee_details( len: u32, - info: &DispatchInfoOf, + info: &DispatchInfoOf, tip: BalanceOf, ) -> FeeDetails> where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) } @@ -551,12 +471,12 @@ where /// weight is used for the weight fee calculation. pub fn compute_actual_fee( len: u32, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, tip: BalanceOf, ) -> BalanceOf where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { Self::compute_actual_fee_details(len, info, post_info, tip).final_fee() } @@ -564,12 +484,12 @@ where /// Compute the actual post dispatch fee details for a particular transaction. pub fn compute_actual_fee_details( len: u32, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, tip: BalanceOf, ) -> FeeDetails> where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { Self::compute_fee_raw( len, @@ -608,7 +528,7 @@ where } fn length_to_fee(length: u32) -> BalanceOf { - T::LengthToFee::weight_to_fee(&Weight::from_ref_time(length as u64)) + T::LengthToFee::weight_to_fee(&(length as Weight)) } fn weight_to_fee(weight: Weight) -> BalanceOf { @@ -650,7 +570,7 @@ pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); impl ChargeTransactionPayment where - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { /// utility constructor. Used only in client/factory code. @@ -658,7 +578,7 @@ where Self(fee) } - /// Returns the tip as being chosen by the transaction sender. + /// Returns the tip as being choosen by the transaction sender. pub fn tip(&self) -> BalanceOf { self.0 } @@ -666,8 +586,8 @@ where fn withdraw_fee( &self, who: &T::AccountId, - call: &T::RuntimeCall, - info: &DispatchInfoOf, + call: &T::Call, + info: &DispatchInfoOf, len: usize, ) -> Result< ( @@ -699,7 +619,7 @@ where /// state of-the-art blockchains, number of per-block transactions is expected to be in a /// range reasonable enough to not saturate the `Balance` type while multiplying by the tip. pub fn get_priority( - info: &DispatchInfoOf, + info: &DispatchInfoOf, len: usize, tip: BalanceOf, final_fee: BalanceOf, @@ -709,12 +629,8 @@ where let max_block_weight = T::BlockWeights::get().max_block; let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; - // TODO: Take into account all dimensions of weight - let max_block_weight = max_block_weight.ref_time(); - let info_weight = info.weight.ref_time(); - - let bounded_weight = info_weight.clamp(1, max_block_weight); - let bounded_length = (len as u64).clamp(1, max_block_length); + let bounded_weight = info.weight.max(1).min(max_block_weight); + let bounded_length = (len as u64).max(1).min(max_block_length); let max_tx_per_block_weight = max_block_weight / bounded_weight; let max_tx_per_block_length = max_block_length / bounded_length; @@ -773,11 +689,11 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { impl SignedExtension for ChargeTransactionPayment where BalanceOf: Send + Sync + From + FixedPointOperand, - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = T::Call; type AdditionalSigned = (); type Pre = ( // tip @@ -839,7 +755,7 @@ impl EstimateCallFee where BalanceOf: FixedPointOperand, - T::RuntimeCall: Dispatchable, + T::Call: Dispatchable, { fn estimate_call_fee(call: &AnyCall, post_info: PostDispatchInfo) -> BalanceOf { let len = call.encoded_size() as u32; @@ -853,6 +769,8 @@ mod tests { use super::*; use crate as pallet_transaction_payment; + use std::cell::RefCell; + use codec::Encode; use sp_core::H256; @@ -863,11 +781,12 @@ mod tests { }; use frame_support::{ - assert_noop, assert_ok, - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, - parameter_types, - traits::{ConstU32, ConstU64, Currency, GenesisBuild, Imbalance, OnUnbalanced}, - weights::{Weight, WeightToFee as WeightToFeeT}, + assert_noop, assert_ok, parameter_types, + traits::{ConstU32, ConstU64, Currency, Imbalance, OnUnbalanced}, + weights::{ + DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight, + WeightToFee as WeightToFeeT, + }, }; use frame_system as system; use pallet_balances::Call as BalancesCall; @@ -887,23 +806,23 @@ mod tests { } ); - const CALL: &::RuntimeCall = - &RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + const CALL: &::Call = + &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); - parameter_types! { - static ExtrinsicBaseWeight: Weight = Weight::zero(); + thread_local! { + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); } pub struct BlockWeights; impl Get for BlockWeights { fn get() -> frame_system::limits::BlockWeights { frame_system::limits::BlockWeights::builder() - .base_block(Weight::zero()) + .base_block(0) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); + weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); + weights.max_total = 1024.into(); }) .build_or_panic() } @@ -920,16 +839,16 @@ mod tests { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -944,7 +863,7 @@ mod tests { impl pallet_balances::Config for Runtime { type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -958,7 +877,7 @@ mod tests { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()) + Self::Balance::saturated_from(*weight) .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) } } @@ -967,14 +886,14 @@ mod tests { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()) + Self::Balance::saturated_from(*weight) .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) } } - parameter_types! { - static TipUnbalancedAmount: u64 = 0; - static FeeUnbalancedAmount: u64 = 0; + thread_local! { + static TIP_UNBALANCED_AMOUNT: RefCell = RefCell::new(0); + static FEE_UNBALANCED_AMOUNT: RefCell = RefCell::new(0); } pub struct DealWithFees; @@ -983,16 +902,16 @@ mod tests { mut fees_then_tips: impl Iterator>, ) { if let Some(fees) = fees_then_tips.next() { - FeeUnbalancedAmount::mutate(|a| *a += fees.peek()); + FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += fees.peek()); if let Some(tips) = fees_then_tips.next() { - TipUnbalancedAmount::mutate(|a| *a += tips.peek()); + TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += tips.peek()); } } } } impl Config for Runtime { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; @@ -1002,26 +921,19 @@ mod tests { pub struct ExtBuilder { balance_factor: u64, - base_weight: Weight, + base_weight: u64, byte_fee: u64, weight_to_fee: u64, - initial_multiplier: Option, } impl Default for ExtBuilder { fn default() -> Self { - Self { - balance_factor: 1, - base_weight: Weight::zero(), - byte_fee: 1, - weight_to_fee: 1, - initial_multiplier: None, - } + Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } } } impl ExtBuilder { - pub fn base_weight(mut self, base_weight: Weight) -> Self { + pub fn base_weight(mut self, base_weight: u64) -> Self { self.base_weight = base_weight; self } @@ -1037,12 +949,8 @@ mod tests { self.balance_factor = factor; self } - pub fn with_initial_multiplier(mut self, multiplier: Multiplier) -> Self { - self.initial_multiplier = Some(multiplier); - self - } fn set_constants(&self) { - ExtrinsicBaseWeight::mutate(|v| *v = self.base_weight); + EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); } @@ -1065,12 +973,6 @@ mod tests { } .assimilate_storage(&mut t) .unwrap(); - - if let Some(multiplier) = self.initial_multiplier { - let genesis = pallet::GenesisConfig { multiplier }; - GenesisBuild::::assimilate_storage(&genesis, &mut t).unwrap(); - } - t.into() } } @@ -1097,43 +999,43 @@ mod tests { fn signed_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .build() .execute_with(|| { let len = 10; let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(5)), len) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(5)), + &info_from_weight(5), &default_post_info(), len, &Ok(()) )); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10); - assert_eq!(TipUnbalancedAmount::get(), 0); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 5 + 10); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 0); - FeeUnbalancedAmount::mutate(|a| *a = 0); + FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(100)), - &post_info_from_weight(Weight::from_ref_time(50)), + &info_from_weight(100), + &post_info_from_weight(50), len, &Ok(()) )); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50); - assert_eq!(TipUnbalancedAmount::get(), 5); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 10 + 50); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5); }); } @@ -1141,22 +1043,22 @@ mod tests { fn signed_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .build() .execute_with(|| { let len = 10; >::put(Multiplier::saturating_from_rational(3, 2)); let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) .unwrap(); // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(100)), - &post_info_from_weight(Weight::from_ref_time(50)), + &info_from_weight(100), + &post_info_from_weight(50), len, &Ok(()) )); @@ -1172,14 +1074,13 @@ mod tests { assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( &1, CALL, - &info_from_weight(Weight::MAX), + &info_from_weight(Weight::max_value()), 10 )); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - - ::BlockWeights::get().max_block.ref_time()) as u64 + (10000 - ::BlockWeights::get().max_block) as u64 ); }); } @@ -1187,7 +1088,7 @@ mod tests { #[test] fn signed_extension_allows_free_transactions() { ExtBuilder::default() - .base_weight(Weight::from_ref_time(100)) + .base_weight(100) .balance_factor(0) .build() .execute_with(|| { @@ -1198,7 +1099,7 @@ mod tests { // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. let operational_transaction = DispatchInfo { - weight: Weight::from_ref_time(0), + weight: 0, class: DispatchClass::Operational, pays_fee: Pays::No, }; @@ -1210,11 +1111,8 @@ mod tests { )); // like a InsecureFreeNormal - let free_transaction = DispatchInfo { - weight: Weight::from_ref_time(0), - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; + let free_transaction = + DispatchInfo { weight: 0, class: DispatchClass::Normal, pays_fee: Pays::Yes }; assert_noop!( ChargeTransactionPayment::::from(0).validate( &1, @@ -1230,7 +1128,7 @@ mod tests { #[test] fn signed_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .balance_factor(10) .build() .execute_with(|| { @@ -1238,10 +1136,8 @@ mod tests { >::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - assert_ok!( - ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(3)), len) - ); + assert_ok!(ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(3), len)); assert_eq!( Balances::free_balance(1), 100 // original @@ -1255,7 +1151,7 @@ mod tests { #[test] fn query_info_and_fee_details_works() { - let call = RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); let origin = 111111; let extra = (); let xt = TestXt::new(call.clone(), Some((origin, extra))); @@ -1266,103 +1162,54 @@ mod tests { let unsigned_xt = TestXt::<_, ()>::new(call, None); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); - ExtBuilder::default() - .base_weight(Weight::from_ref_time(5)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); + ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 - }), - tip: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); - } - - #[test] - fn query_call_info_and_fee_details_works() { - let call = RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); - let info = call.get_dispatch_info(); - let encoded_call = call.encode(); - let len = encoded_call.len() as u32; + + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ + }, + ); - ExtBuilder::default() - .base_weight(Weight::from_ref_time(5)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); - assert_eq!( - TransactionPayment::query_call_info(call.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * + 2 * 3 / 2 + }), + tip: 0, + }, + ); - assert_eq!( - TransactionPayment::query_call_fee_details(call, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, /* base * weight_fee */ - len_fee: len as u64, /* len * 1 */ - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ - }), - tip: 0, - }, - ); - }); + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); } #[test] fn compute_fee_works_without_multiplier() { ExtBuilder::default() - .base_weight(Weight::from_ref_time(100)) + .base_weight(100) .byte_fee(10) .balance_factor(0) .build() @@ -1372,14 +1219,14 @@ mod tests { // Tip only, no fees works let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(0), + weight: 0, class: DispatchClass::Operational, pays_fee: Pays::No, }; assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(0), + weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1390,7 +1237,7 @@ mod tests { assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(1000), + weight: 1000, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1401,7 +1248,7 @@ mod tests { #[test] fn compute_fee_works_with_multiplier() { ExtBuilder::default() - .base_weight(Weight::from_ref_time(100)) + .base_weight(100) .byte_fee(10) .balance_factor(0) .build() @@ -1410,7 +1257,7 @@ mod tests { >::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(0), + weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1418,7 +1265,7 @@ mod tests { // Everything works together :) let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(123), + weight: 123, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1433,7 +1280,7 @@ mod tests { #[test] fn compute_fee_works_with_negative_multiplier() { ExtBuilder::default() - .base_weight(Weight::from_ref_time(100)) + .base_weight(100) .byte_fee(10) .balance_factor(0) .build() @@ -1443,7 +1290,7 @@ mod tests { // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(0), + weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1451,7 +1298,7 @@ mod tests { // Everything works together. let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(123), + weight: 123, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1466,14 +1313,14 @@ mod tests { #[test] fn compute_fee_does_not_overflow() { ExtBuilder::default() - .base_weight(Weight::from_ref_time(100)) + .base_weight(100) .byte_fee(10) .balance_factor(0) .build() .execute_with(|| { // Overflow is handled let dispatch_info = DispatchInfo { - weight: Weight::MAX, + weight: Weight::max_value(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1488,14 +1335,14 @@ mod tests { fn refund_does_not_recreate_account() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .build() .execute_with(|| { // So events are emitted System::set_block_number(10); let len = 10; let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); @@ -1505,18 +1352,20 @@ mod tests { assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(100)), - &post_info_from_weight(Weight::from_ref_time(50)), + &info_from_weight(100), + &post_info_from_weight(50), len, &Ok(()) )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event - System::assert_has_event(RuntimeEvent::Balances( - pallet_balances::Event::Transfer { from: 2, to: 3, amount: 80 }, - )); + System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer { + from: 2, + to: 3, + amount: 80, + })); // Killed Event - System::assert_has_event(RuntimeEvent::System(system::Event::KilledAccount { + System::assert_has_event(Event::System(system::Event::KilledAccount { account: 2, })); }); @@ -1526,19 +1375,19 @@ mod tests { fn actual_weight_higher_than_max_refunds_nothing() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .build() .execute_with(|| { let len = 10; let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(Weight::from_ref_time(100)), - &post_info_from_weight(Weight::from_ref_time(101)), + &info_from_weight(100), + &post_info_from_weight(101), len, &Ok(()) )); @@ -1550,17 +1399,14 @@ mod tests { fn zero_transfer_on_free_transaction() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(5)) + .base_weight(5) .build() .execute_with(|| { // So events are emitted System::set_block_number(10); let len = 10; - let dispatch_info = DispatchInfo { - weight: Weight::from_ref_time(100), - pays_fee: Pays::No, - class: DispatchClass::Normal, - }; + let dispatch_info = + DispatchInfo { weight: 100, pays_fee: Pays::No, class: DispatchClass::Normal }; let user = 69; let pre = ChargeTransactionPayment::::from(0) .pre_dispatch(&user, CALL, &dispatch_info, len) @@ -1575,7 +1421,7 @@ mod tests { )); assert_eq!(Balances::total_balance(&user), 0); // TransactionFeePaid Event - System::assert_has_event(RuntimeEvent::TransactionPayment( + System::assert_has_event(Event::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: user, actual_fee: 0, @@ -1589,11 +1435,11 @@ mod tests { fn refund_consistent_with_actual_weight() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(7)) + .base_weight(7) .build() .execute_with(|| { - let info = info_from_weight(Weight::from_ref_time(100)); - let post_info = post_info_from_weight(Weight::from_ref_time(33)); + let info = info_from_weight(100); + let post_info = post_info_from_weight(33); let prev_balance = Balances::free_balance(2); let len = 10; let tip = 5; @@ -1629,11 +1475,8 @@ mod tests { let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = DispatchInfo { - weight: Weight::from_ref_time(100), - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let priority = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1651,7 +1494,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_ref_time(100), + weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1675,11 +1518,8 @@ mod tests { let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = DispatchInfo { - weight: Weight::from_ref_time(100), - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let priority = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1690,7 +1530,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_ref_time(100), + weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1709,11 +1549,8 @@ mod tests { let mut priority2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = DispatchInfo { - weight: Weight::from_ref_time(100), - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; priority1 = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1722,7 +1559,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_ref_time(100), + weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1749,10 +1586,10 @@ mod tests { fn post_info_can_change_pays_fee() { ExtBuilder::default() .balance_factor(10) - .base_weight(Weight::from_ref_time(7)) + .base_weight(7) .build() .execute_with(|| { - let info = info_from_weight(Weight::from_ref_time(100)); + let info = info_from_weight(100); let post_info = post_info_from_pays(Pays::No); let prev_balance = Balances::free_balance(2); let len = 10; @@ -1782,24 +1619,4 @@ mod tests { assert_eq!(refund_based_fee, actual_fee); }); } - - #[test] - fn genesis_config_works() { - ExtBuilder::default() - .with_initial_multiplier(Multiplier::from_u32(100)) - .build() - .execute_with(|| { - assert_eq!( - >::get(), - Multiplier::saturating_from_integer(100) - ); - }); - } - - #[test] - fn genesis_default_works() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(>::get(), Multiplier::saturating_from_integer(1)); - }); - } } diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index ebc9c5c5afd62..3a5fad0d66a52 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -37,8 +37,8 @@ pub trait OnChargeTransaction { /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - call: &T::RuntimeCall, - dispatch_info: &DispatchInfoOf, + call: &T::Call, + dispatch_info: &DispatchInfoOf, fee: Self::Balance, tip: Self::Balance, ) -> Result; @@ -50,8 +50,8 @@ pub trait OnChargeTransaction { /// Note: The `fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - dispatch_info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, @@ -92,8 +92,8 @@ where /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - _call: &T::RuntimeCall, - _info: &DispatchInfoOf, + _call: &T::Call, + _info: &DispatchInfoOf, fee: Self::Balance, tip: Self::Balance, ) -> Result { @@ -120,8 +120,8 @@ where /// Note: The `corrected_fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - _dispatch_info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, + _dispatch_info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index d1a480b64e116..3faebfed48946 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; use sp_std::prelude::*; -use frame_support::dispatch::DispatchClass; +use frame_support::weights::{DispatchClass, Weight}; /// The base fee and adjusted weight and length fees constitute the _inclusion fee_. #[derive(Encode, Decode, Clone, Eq, PartialEq)] @@ -94,15 +94,9 @@ impl FeeDetails { #[derive(Eq, PartialEq, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[cfg_attr( - feature = "std", - serde(bound(serialize = "Balance: std::fmt::Display, Weight: Serialize")) -)] -#[cfg_attr( - feature = "std", - serde(bound(deserialize = "Balance: std::str::FromStr, Weight: Deserialize<'de>")) -)] -pub struct RuntimeDispatchInfo { +#[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] +#[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] +pub struct RuntimeDispatchInfo { /// Weight of this dispatch. pub weight: Weight, /// Class of this dispatch. @@ -137,18 +131,16 @@ mod serde_balance { #[cfg(test)] mod tests { use super::*; - use frame_support::weights::Weight; #[test] fn should_serialize_and_deserialize_properly_with_string() { let info = RuntimeDispatchInfo { - weight: Weight::from_ref_time(5), + weight: 5, class: DispatchClass::Normal, partial_fee: 1_000_000_u64, }; - let json_str = - r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"1000000"}"#; + let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); @@ -160,12 +152,12 @@ mod tests { #[test] fn should_serialize_and_deserialize_properly_large_value() { let info = RuntimeDispatchInfo { - weight: Weight::from_ref_time(5), + weight: 5, class: DispatchClass::Normal, partial_fee: u128::max_value(), }; - let json_str = r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index a6e177af1853d..f001ef6acd468 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -13,8 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "4.1", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +hex-literal = { version = "0.3.4", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } @@ -34,10 +34,8 @@ sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = true, [features] default = ["std"] -runtime-benchmarks = ["array-bytes", "frame-benchmarking/runtime-benchmarks"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "hex-literal"] std = [ - "log/std", - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", @@ -50,4 +48,3 @@ std = [ "sp-std/std", "sp-transaction-storage-proof/std", ] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs index c7fbd00fb565d..83dd37922a31f 100644 --- a/frame/transaction-storage/src/benchmarking.rs +++ b/frame/transaction-storage/src/benchmarking.rs @@ -40,75 +40,74 @@ use crate::Pallet as TransactionStorage; // build_proof(hash.as_slice(), transactions).unwrap().encode() // ``` // while hardforcing target chunk key in `build_proof` to [22, 21, 1, 0]. -const PROOF: &str = "\ - 0104000000000000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000000000000000\ - 00000000000000000000000000000000000000000000000000000000000014cd0780ffff8030\ - 2eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba0080302eb0a6d2\ - f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15\ - f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1\ - 004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e304\ - 8cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697\ - eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a\ - 30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302e\ - b0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b\ - 834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e7\ - 29d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c10046\ - 57e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf2\ - 06d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb1\ - 53f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba\ - bd058077778010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de\ - 808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f\ - 19117affa96e077905fe48a99723a065969c638593b7d9ab57b538438010fd81bc1359802f0b\ - 871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bc\ - cee5e71d5cf6b1faff338ad7120b0256c283008010fd81bc1359802f0b871aeb95e4410a8ec9\ - 2b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff\ - 338ad7120b0256c28380221ce17f19117affa96e077905fe48a99723a065969c638593b7d9ab\ - 57b538438010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de80\ - 8da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f19\ - 117affa96e077905fe48a99723a065969c638593b7d9ab57b53843cd0780ffff804509f59593\ - fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c00804509f59593fd47b1a9\ - 7189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba6\ - 5a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0\ - 346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f983\ - 6e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf89\ - 1a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c8045\ - 09f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd\ - 47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189\ - 127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a56\ - 49cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb03466\ - 37f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e15\ - 5eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a93\ - 9c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939ccd0780ff\ - ff8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e\ - 776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea\ - 05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f\ - 015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d\ - 06feafa3610fc44a5b2ef543cb81008078916e776c64ccea05e958559f015c082d9d06feafa3\ - 610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b\ - 2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb81\ - 8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e77\ - 6c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05\ - e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f01\ - 5c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06\ - feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610f\ - c44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef5\ - 43cb811044010000\ -"; -fn proof() -> Vec { - array_bytes::hex2bytes_unchecked(PROOF) -} +const PROOF: &[u8] = &hex_literal::hex!( + " + 0104000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000 + 00000000000000000000000000000000000000000000000000000000000014cd0780ffff8030 + 2eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba0080302eb0a6d2 + f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15 + f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1 + 004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e304 + 8cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697 + eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a + 30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302e + b0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b + 834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e7 + 29d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c10046 + 57e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf2 + 06d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb1 + 53f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba + bd058077778010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de + 808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f + 19117affa96e077905fe48a99723a065969c638593b7d9ab57b538438010fd81bc1359802f0b + 871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bc + cee5e71d5cf6b1faff338ad7120b0256c283008010fd81bc1359802f0b871aeb95e4410a8ec9 + 2b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff + 338ad7120b0256c28380221ce17f19117affa96e077905fe48a99723a065969c638593b7d9ab + 57b538438010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de80 + 8da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f19 + 117affa96e077905fe48a99723a065969c638593b7d9ab57b53843cd0780ffff804509f59593 + fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c00804509f59593fd47b1a9 + 7189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba6 + 5a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0 + 346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f983 + 6e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf89 + 1a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c8045 + 09f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd + 47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189 + 127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a56 + 49cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb03466 + 37f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e15 + 5eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a93 + 9c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939ccd0780ff + ff8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e + 776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea + 05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f + 015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d + 06feafa3610fc44a5b2ef543cb81008078916e776c64ccea05e958559f015c082d9d06feafa3 + 610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b + 2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb81 + 8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e77 + 6c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05 + e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f01 + 5c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06 + feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610f + c44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef5 + 43cb811044010000 + " +); type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { let events = System::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); + let system_event: ::Event = generic_event.into(); let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } @@ -160,8 +159,8 @@ benchmarks! { )?; } run_to_block::(StoragePeriod::::get() + T::BlockNumber::one()); - let encoded_proof = proof(); - let proof = TransactionStorageProof::decode(&mut &*encoded_proof).unwrap(); + let mut encoded_proof = PROOF; + let proof = TransactionStorageProof::decode(&mut encoded_proof).unwrap(); }: check_proof(RawOrigin::None, proof) verify { assert_last_event::(Event::ProofChecked.into()); diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index 07144c5617113..f16b8f029662b 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -92,10 +92,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// A dispatchable call. - type RuntimeCall: Parameter - + Dispatchable + type Call: Parameter + + Dispatchable + GetDispatchInfo + From>; /// The currency trait. @@ -245,11 +245,9 @@ pub mod pallet { let sender = ensure_signed(origin)?; let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; - let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; - Self::apply_fee(sender, info.size)?; + let extrinsic_index = >::extrinsic_index().unwrap(); sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index 8764b16c31d8d..771387ef705be 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -51,8 +51,8 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -60,7 +60,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -77,7 +77,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -87,8 +87,8 @@ impl pallet_balances::Config for Test { } impl pallet_transaction_storage::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type Currency = Balances; type FeeDestination = (); type WeightInfo = (); @@ -116,7 +116,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub fn run_to_block(n: u64, f: impl Fn() -> Option) { while System::block_number() < n { if let Some(proof) = f() { - TransactionStorage::check_proof(RuntimeOrigin::none(), proof).unwrap(); + TransactionStorage::check_proof(Origin::none(), proof).unwrap(); } TransactionStorage::on_finalize(System::block_number()); System::on_finalize(System::block_number()); diff --git a/frame/transaction-storage/src/tests.rs b/frame/transaction-storage/src/tests.rs index 01b71a7851ac3..8825890ae67a2 100644 --- a/frame/transaction-storage/src/tests.rs +++ b/frame/transaction-storage/src/tests.rs @@ -31,11 +31,11 @@ fn discards_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller).into(), + RawOrigin::Signed(caller.clone()).into(), vec![0u8; 2000 as usize] )); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller).into(), + RawOrigin::Signed(caller.clone()).into(), vec![0u8; 2000 as usize] )); let proof_provider = || { @@ -74,7 +74,7 @@ fn burns_fee() { Error::::InsufficientFunds, ); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller).into(), + RawOrigin::Signed(caller.clone()).into(), vec![0u8; 2000 as usize] )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 2000 * 2 - 200); @@ -87,7 +87,7 @@ fn checks_proof() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller).into(), + RawOrigin::Signed(caller.clone()).into(), vec![0u8; MAX_DATA_SIZE as usize] )); run_to_block(10, || None); @@ -95,7 +95,7 @@ fn checks_proof() { let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); assert_noop!( - TransactionStorage::::check_proof(RuntimeOrigin::none(), proof,), + TransactionStorage::::check_proof(Origin::none(), proof,), Error::::UnexpectedProof, ); run_to_block(11, || None); @@ -103,13 +103,13 @@ fn checks_proof() { let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); assert_noop!( - TransactionStorage::::check_proof(RuntimeOrigin::none(), invalid_proof,), + TransactionStorage::::check_proof(Origin::none(), invalid_proof,), Error::::InvalidProof, ); let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); - assert_ok!(TransactionStorage::::check_proof(RuntimeOrigin::none(), proof)); + assert_ok!(TransactionStorage::::check_proof(Origin::none(), proof)); }); } @@ -119,13 +119,13 @@ fn renews_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller).into(), + RawOrigin::Signed(caller.clone()).into(), vec![0u8; 2000] )); let info = BlockTransactions::::get().last().unwrap().clone(); run_to_block(6, || None); assert_ok!(TransactionStorage::::renew( - RawOrigin::Signed(caller).into(), + RawOrigin::Signed(caller.clone()).into(), 1, // block 0, // transaction )); diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 16d12aa75ab4d..b8bc4890a416e 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_transaction_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/transaction-storage/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/transaction-storage/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,29 +52,29 @@ pub trait WeightInfo { /// Weights for pallet_transaction_storage using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: TransactionStorage MaxTransactionSize (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// The range of component `l` is `[1, 8388608]`. + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - // Minimum execution time: 46_730 nanoseconds. - Weight::from_ref_time(46_922_000 as u64) - // Standard Error: 2 - .saturating_add(Weight::from_ref_time(5_601 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (0 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: TransactionStorage Transactions (r:1 w:0) - // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - // Minimum execution time: 56_802 nanoseconds. - Weight::from_ref_time(58_670_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (50_978_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -85,38 +82,37 @@ impl WeightInfo for SubstrateWeight { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - // Minimum execution time: 74_016 nanoseconds. - Weight::from_ref_time(94_111_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (106_990_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { + // Storage: TransactionStorage MaxTransactionSize (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// The range of component `l` is `[1, 8388608]`. + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - // Minimum execution time: 46_730 nanoseconds. - Weight::from_ref_time(46_922_000 as u64) - // Standard Error: 2 - .saturating_add(Weight::from_ref_time(5_601 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (0 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: TransactionStorage Transactions (r:1 w:0) - // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - // Minimum execution time: 56_802 nanoseconds. - Weight::from_ref_time(58_670_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (50_978_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -124,9 +120,8 @@ impl WeightInfo for () { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - // Minimum execution time: 74_016 nanoseconds. - Weight::from_ref_time(94_111_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (106_990_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } } diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 08b0acdba5deb..56c2fcdab58f7 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -34,7 +34,6 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index d718a5fb89521..ddb952383370d 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -34,7 +34,7 @@ const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. fn setup_proposal, I: 'static>( u: u32, -) -> (T::AccountId, BalanceOf, AccountIdLookupOf) { +) -> (T::AccountId, BalanceOf, ::Source) { let caller = account("caller", u, SEED); let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); let _ = T::Currency::make_free_balance_be(&caller, value); @@ -61,7 +61,7 @@ fn setup_pot_account, I: 'static>() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -99,8 +99,7 @@ benchmarks_instance_pallet! { beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; - let reject_origin = T::RejectOrigin::successful_origin(); - }: _(reject_origin, proposal_id) + }: _(RawOrigin::Root, proposal_id) approve_proposal { let p in 0 .. T::MaxApprovals::get() - 1; @@ -112,8 +111,7 @@ benchmarks_instance_pallet! { beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; - let approve_origin = T::ApproveOrigin::successful_origin(); - }: _(approve_origin, proposal_id) + }: _(RawOrigin::Root, proposal_id) remove_approval { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); @@ -124,8 +122,7 @@ benchmarks_instance_pallet! { )?; let proposal_id = Treasury::::proposal_count() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; - let reject_origin = T::RejectOrigin::successful_origin(); - }: _(reject_origin, proposal_id) + }: _(RawOrigin::Root, proposal_id) on_initialize_proposals { let p in 0 .. T::MaxApprovals::get(); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 21b4d2b769c8b..6730f985b16e0 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -93,7 +93,6 @@ pub type PositiveImbalanceOf = <>::Currency as Currenc pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -149,14 +148,13 @@ pub mod pallet { type Currency: Currency + ReservableCurrency; /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; + type ApproveOrigin: EnsureOrigin; /// Origin from which rejections must come. - type RejectOrigin: EnsureOrigin; + type RejectOrigin: EnsureOrigin; /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. type OnSlash: OnUnbalanced>; @@ -204,7 +202,7 @@ pub mod pallet { /// The origin required for approving spends from the treasury outside of the proposal /// process. The `Success` value is the maximum amount that this origin is allowed to /// spend at a time. - type SpendOrigin: EnsureOrigin>; + type SpendOrigin: EnsureOrigin>; } /// Number of proposals that have been made. @@ -320,7 +318,7 @@ pub mod pallet { if (n % T::SpendPeriod::get()).is_zero() { Self::spend_funds() } else { - Weight::zero() + 0 } } } @@ -340,7 +338,7 @@ pub mod pallet { pub fn propose_spend( origin: OriginFor, #[pallet::compact] value: BalanceOf, - beneficiary: AccountIdLookupOf, + beneficiary: ::Source, ) -> DispatchResult { let proposer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -421,7 +419,7 @@ pub mod pallet { pub fn spend( origin: OriginFor, #[pallet::compact] amount: BalanceOf, - beneficiary: AccountIdLookupOf, + beneficiary: ::Source, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -501,7 +499,7 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { - let mut total_weight = Weight::zero(); + let mut total_weight: Weight = Zero::zero(); let mut budget_remaining = Self::pot(); Self::deposit_event(Event::Spending { budget_remaining }); diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 9cfe147ec4ce4..61eafb652427b 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -19,6 +19,8 @@ #![cfg(test)] +use std::cell::RefCell; + use sp_core::H256; use sp_runtime::{ testing::Header, @@ -53,23 +55,23 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -86,33 +88,36 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); } pub struct TestSpendOrigin; -impl frame_support::traits::EnsureOrigin for TestSpendOrigin { +impl frame_support::traits::EnsureOrigin for TestSpendOrigin { type Success = u64; - fn try_origin(o: RuntimeOrigin) -> Result { - Result::, RuntimeOrigin>::from(o).and_then(|o| match o { + fn try_origin(o: Origin) -> Result { + Result::, Origin>::from(o).and_then(|o| match o { frame_system::RawOrigin::Root => Ok(u64::max_value()), frame_system::RawOrigin::Signed(10) => Ok(5), frame_system::RawOrigin::Signed(11) => Ok(10), frame_system::RawOrigin::Signed(12) => Ok(20), frame_system::RawOrigin::Signed(13) => Ok(50), - r => Err(RuntimeOrigin::from(r)), + r => Err(Origin::from(r)), }) } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(RuntimeOrigin::root()) + fn try_successful_origin() -> Result { + Ok(Origin::root()) } } @@ -121,7 +126,7 @@ impl Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -158,21 +163,21 @@ fn genesis_config_works() { #[test] fn spend_origin_permissioning_works() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::spend(RuntimeOrigin::signed(1), 1, 1), BadOrigin); + assert_noop!(Treasury::spend(Origin::signed(1), 1, 1), BadOrigin); assert_noop!( - Treasury::spend(RuntimeOrigin::signed(10), 6, 1), + Treasury::spend(Origin::signed(10), 6, 1), Error::::InsufficientPermission ); assert_noop!( - Treasury::spend(RuntimeOrigin::signed(11), 11, 1), + Treasury::spend(Origin::signed(11), 11, 1), Error::::InsufficientPermission ); assert_noop!( - Treasury::spend(RuntimeOrigin::signed(12), 21, 1), + Treasury::spend(Origin::signed(12), 21, 1), Error::::InsufficientPermission ); assert_noop!( - Treasury::spend(RuntimeOrigin::signed(13), 51, 1), + Treasury::spend(Origin::signed(13), 51, 1), Error::::InsufficientPermission ); }); @@ -183,13 +188,13 @@ fn spend_origin_works() { new_test_ext().execute_with(|| { // Check that accumulate works when we have Some value in Dummy already. Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(11), 10, 6)); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(12), 20, 6)); - assert_ok!(Treasury::spend(RuntimeOrigin::signed(13), 50, 6)); + assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(Origin::signed(11), 10, 6)); + assert_ok!(Treasury::spend(Origin::signed(12), 20, 6)); + assert_ok!(Treasury::spend(Origin::signed(13), 50, 6)); >::on_initialize(1); assert_eq!(Balances::free_balance(6), 0); @@ -212,7 +217,7 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); assert_eq!(Balances::free_balance(0), 99); assert_eq!(Balances::reserved_balance(0), 1); }); @@ -221,7 +226,7 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); }); @@ -231,7 +236,7 @@ fn spend_proposal_takes_proportional_deposit() { fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3), + Treasury::propose_spend(Origin::signed(2), 100, 3), Error::::InsufficientProposersBalance, ); }); @@ -242,8 +247,8 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -269,8 +274,8 @@ fn rejected_spend_proposal_ignored_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 0); @@ -283,32 +288,23 @@ fn reject_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); - assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), - Error::::InvalidIndex - ); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } #[test] fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!( - Treasury::reject_proposal(RuntimeOrigin::root(), 0), - Error::::InvalidIndex - ); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } #[test] fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), - Error::::InvalidIndex - ); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } @@ -317,12 +313,9 @@ fn accept_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); - assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), - Error::::InvalidIndex - ); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } @@ -332,8 +325,8 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -347,8 +340,8 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -369,14 +362,14 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -399,10 +392,10 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -444,14 +437,14 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&0, u64::MAX); for _ in 0..::MaxApprovals::get() { - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); } // One too many will fail - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); assert_noop!( - Treasury::approve_proposal(RuntimeOrigin::root(), 0), + Treasury::approve_proposal(Origin::root(), 0), Error::::TooManyApprovals ); }); @@ -462,14 +455,14 @@ fn remove_already_removed_approval_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); assert_eq!(Treasury::approvals(), vec![0]); - assert_ok!(Treasury::remove_approval(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::remove_approval(Origin::root(), 0)); assert_eq!(Treasury::approvals(), vec![]); assert_noop!( - Treasury::remove_approval(RuntimeOrigin::root(), 0), + Treasury::remove_approval(Origin::root(), 0), Error::::ProposalNotApproved ); }); diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 3ee071ac700f1..f6b5414a05652 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_treasury //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/treasury/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/treasury/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,114 +55,106 @@ pub trait WeightInfo { /// Weights for pallet_treasury using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: Treasury ProposalCount (r:1 w:1) + // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - // Minimum execution time: 137 nanoseconds. - Weight::from_ref_time(153_000 as u64) + (22_063_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - // Minimum execution time: 31_437 nanoseconds. - Weight::from_ref_time(32_241_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (26_473_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - // Minimum execution time: 38_351 nanoseconds. - Weight::from_ref_time(38_828_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (29_955_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) - /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { - // Minimum execution time: 11_937 nanoseconds. - Weight::from_ref_time(15_541_763 as u64) - // Standard Error: 1_036 - .saturating_add(Weight::from_ref_time(128_326 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (10_786_000 as Weight) + // Standard Error: 0 + .saturating_add((110_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - // Minimum execution time: 9_611 nanoseconds. - Weight::from_ref_time(10_012_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (6_647_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) - /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { - // Minimum execution time: 43_016 nanoseconds. - Weight::from_ref_time(56_538_751 as u64) - // Standard Error: 14_890 - .saturating_add(Weight::from_ref_time(26_789_120 as u64).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(p as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(p as u64))) + (25_805_000 as Weight) + // Standard Error: 18_000 + .saturating_add((28_473_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } } // For backwards compatibility and tests impl WeightInfo for () { + // Storage: Treasury ProposalCount (r:1 w:1) + // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - // Minimum execution time: 137 nanoseconds. - Weight::from_ref_time(153_000 as u64) + (22_063_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - // Minimum execution time: 31_437 nanoseconds. - Weight::from_ref_time(32_241_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (26_473_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - // Minimum execution time: 38_351 nanoseconds. - Weight::from_ref_time(38_828_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (29_955_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) - /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { - // Minimum execution time: 11_937 nanoseconds. - Weight::from_ref_time(15_541_763 as u64) - // Standard Error: 1_036 - .saturating_add(Weight::from_ref_time(128_326 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (10_786_000 as Weight) + // Standard Error: 0 + .saturating_add((110_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - // Minimum execution time: 9_611 nanoseconds. - Weight::from_ref_time(10_012_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (6_647_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) - /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { - // Minimum execution time: 43_016 nanoseconds. - Weight::from_ref_time(56_538_751 as u64) - // Standard Error: 14_890 - .saturating_add(Weight::from_ref_time(26_789_120 as u64).saturating_mul(p as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(p as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(p as u64))) + (25_805_000 as Weight) + // Standard Error: 18_000 + .saturating_add((28_473_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } } diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml index 51b6f91784594..075de318c2a05 100644 --- a/frame/try-runtime/Cargo.toml +++ b/frame/try-runtime/Cargo.toml @@ -13,8 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"]} -frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -22,12 +21,8 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [features] default = [ "std" ] std = [ - "codec/std", "frame-support/std", "sp-api/std", "sp-runtime/std", "sp-std/std", ] -try-runtime = [ - "frame-support/try-runtime", -] diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs index ed1247bd8e6f2..e4f01d40c9d42 100644 --- a/frame/try-runtime/src/lib.rs +++ b/frame/try-runtime/src/lib.rs @@ -18,9 +18,7 @@ //! Supporting types for try-runtime, testing and dry-running commands. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "try-runtime")] -pub use frame_support::traits::TryStateSelect; use frame_support::weights::Weight; sp_api::decl_runtime_apis! { @@ -39,6 +37,6 @@ sp_api::decl_runtime_apis! { /// /// This is only sensible where the incoming block is from a different network, yet it has /// the same block format as the runtime implementing this API. - fn execute_block(block: Block, state_root_check: bool, try_state: TryStateSelect) -> Weight; + fn execute_block_no_check(block: Block) -> Weight; } } diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index 90a43861ebc4a..59d609b8033e7 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -33,7 +33,7 @@ sp-std = { version = "4.0.0", path = "../../primitives/std" } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index ab34558f95eb3..86247fb964ff5 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -37,22 +37,20 @@ use crate::Pallet as Uniques; const SEED: u32 = 0; fn create_collection, I: 'static>( -) -> (T::CollectionId, T::AccountId, AccountIdLookupOf) { +) -> (T::CollectionId, T::AccountId, ::Source) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - assert!(Uniques::::force_create( - SystemOrigin::Root.into(), - collection, - caller_lookup.clone(), - false, - ) - .is_ok()); + assert!( + Uniques::::force_create(SystemOrigin::Root.into(), caller_lookup.clone(), false,) + .is_ok() + ); (collection, caller, caller_lookup) } -fn add_collection_metadata, I: 'static>() -> (T::AccountId, AccountIdLookupOf) { +fn add_collection_metadata, I: 'static>( +) -> (T::AccountId, ::Source) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -70,7 +68,7 @@ fn add_collection_metadata, I: 'static>() -> (T::AccountId, Account fn mint_item, I: 'static>( index: u16, -) -> (T::ItemId, T::AccountId, AccountIdLookupOf) { +) -> (T::ItemId, T::AccountId, ::Source) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().admin; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -89,7 +87,7 @@ fn mint_item, I: 'static>( fn add_item_metadata, I: 'static>( item: T::ItemId, -) -> (T::AccountId, AccountIdLookupOf) { +) -> (T::AccountId, ::Source) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -108,7 +106,7 @@ fn add_item_metadata, I: 'static>( fn add_item_attribute, I: 'static>( item: T::ItemId, -) -> (BoundedVec, T::AccountId, AccountIdLookupOf) { +) -> (BoundedVec, T::AccountId, ::Source) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -126,9 +124,9 @@ fn add_item_attribute, I: 'static>( (key, caller, caller_lookup) } -fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { let events = frame_system::Pallet::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); @@ -142,7 +140,7 @@ benchmarks_instance_pallet! { whitelist_account!(caller); let admin = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - let call = Call::::create { collection, admin }; + let call = Call::::create { admin }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); @@ -151,7 +149,7 @@ benchmarks_instance_pallet! { force_create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - }: _(SystemOrigin::Root, T::Helper::collection(0), caller_lookup, true) + }: _(SystemOrigin::Root, caller_lookup, true) verify { assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); } @@ -407,6 +405,16 @@ benchmarks_instance_pallet! { }.into()); } + try_increment_id { + let (_, caller, _) = create_collection::(); + Uniques::::set_next_id(0); + }: _(SystemOrigin::Signed(caller.clone())) + verify { + assert_last_event::(Event::NextCollectionIdIncremented { + next_id: 1u32.into() + }.into()); + } + set_price { let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 99d16e0d903de..2409632ff646d 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -48,12 +48,6 @@ impl, I: 'static> Pallet { Account::::insert((&dest, &collection, &item), ()); let origin = details.owner; details.owner = dest; - - // The approved account has to be reset to None, because otherwise pre-approve attack would - // be possible, where the owner can approve his second account before making the transaction - // and then claiming the item back. - details.approved = None; - Item::::insert(&collection, &item, &details); ItemPriceOf::::remove(&collection, &item); @@ -94,7 +88,12 @@ impl, I: 'static> Pallet { }, ); + let next_id = collection.saturating_add(1u32.into()); + CollectionAccount::::insert(&owner, &collection, ()); + NextCollectionId::::set(next_id); + + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); Self::deposit_event(event); Ok(()) } @@ -214,6 +213,16 @@ impl, I: 'static> Pallet { Ok(()) } + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn set_next_id(count: u32) { + NextCollectionId::::set(count.into()); + } + + #[cfg(test)] + pub fn get_next_id() -> T::CollectionId { + NextCollectionId::::get() + } + pub fn do_set_price( collection: T::CollectionId, item: T::ItemId, @@ -225,7 +234,11 @@ impl, I: 'static> Pallet { ensure!(details.owner == sender, Error::::NoPermission); if let Some(ref price) = price { - ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); + ItemPriceOf::::insert( + &collection, + &item, + (price.clone(), whitelisted_buyer.clone()), + ); Self::deposit_event(Event::ItemPriceSet { collection, item, diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index 47036f8b0c37b..4162d0a0879ff 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -19,7 +19,6 @@ use super::*; use frame_support::{ - storage::KeyPrefixIterator, traits::{ tokens::nonfungibles::{Inspect, Transfer, *}, Get, @@ -159,31 +158,25 @@ impl, I: 'static> Transfer for Pallet { } impl, I: 'static> InspectEnumerable for Pallet { - type CollectionsIterator = KeyPrefixIterator<>::CollectionId>; - type ItemsIterator = KeyPrefixIterator<>::ItemId>; - type OwnedIterator = - KeyPrefixIterator<(>::CollectionId, >::ItemId)>; - type OwnedInCollectionIterator = KeyPrefixIterator<>::ItemId>; - /// Returns an iterator of the collections in existence. /// /// NOTE: iterating this list invokes a storage read per item. - fn collections() -> Self::CollectionsIterator { - CollectionMetadataOf::::iter_keys() + fn collections() -> Box> { + Box::new(CollectionMetadataOf::::iter_keys()) } /// Returns an iterator of the items of a `collection` in existence. /// /// NOTE: iterating this list invokes a storage read per item. - fn items(collection: &Self::CollectionId) -> Self::ItemsIterator { - ItemMetadataOf::::iter_key_prefix(collection) + fn items(collection: &Self::CollectionId) -> Box> { + Box::new(ItemMetadataOf::::iter_key_prefix(collection)) } /// Returns an iterator of the items of all collections owned by `who`. /// /// NOTE: iterating this list invokes a storage read per item. - fn owned(who: &T::AccountId) -> Self::OwnedIterator { - Account::::iter_key_prefix((who,)) + fn owned(who: &T::AccountId) -> Box> { + Box::new(Account::::iter_key_prefix((who,))) } /// Returns an iterator of the items of `collection` owned by `who`. @@ -192,7 +185,7 @@ impl, I: 'static> InspectEnumerable for Pallet fn owned_in_collection( collection: &Self::CollectionId, who: &T::AccountId, - ) -> Self::OwnedInCollectionIterator { - Account::::iter_key_prefix((who, collection)) + ) -> Box> { + Box::new(Account::::iter_key_prefix((who, collection))) } } diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index ec48b5343d217..e1f21fd14a2a4 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -63,8 +63,6 @@ pub use pallet::*; pub use types::*; pub use weights::WeightInfo; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -94,11 +92,15 @@ pub mod pallet { /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// Identifier for the collection of item. - type CollectionId: Member + Parameter + MaxEncodedLen + Copy; + type CollectionId: Member + + Parameter + + MaxEncodedLen + + Copy + + Default + + AtLeast32BitUnsigned; /// The type used to identify a unique item within a collection. type ItemId: Member + Parameter + MaxEncodedLen + Copy; @@ -130,14 +132,14 @@ pub mod pallet { /// The origin which may forcibly create or destroy an item or otherwise alter privileged /// attributes. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// Standard collection creation is only allowed if the origin attempting it and the /// collection are in this set. type CreateOrigin: EnsureOriginWithArg< - Self::RuntimeOrigin, - Self::CollectionId, Success = Self::AccountId, + Self::Origin, + Self::CollectionId, >; /// Locker trait to enable Locking mechanism downstream. @@ -294,6 +296,12 @@ pub mod pallet { pub(super) type CollectionMaxSupply, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; + #[pallet::storage] + /// Stores the `CollectionId` that is going to be used for the next collection. + /// This gets incremented by 1 whenever a new collection is created. + pub(super) type NextCollectionId, I: 'static = ()> = + StorageValue<_, T::CollectionId, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -385,6 +393,8 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// Event gets emmited when the `NextCollectionId` gets incremented. + NextCollectionIdIncremented { next_id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { collection: T::CollectionId, @@ -436,6 +446,10 @@ pub mod pallet { MaxSupplyAlreadySet, /// The provided max supply is less to the amount of items a collection already has. MaxSupplyTooSmall, + /// The `CollectionId` in `NextCollectionId` is not being used. + /// + /// This means that you can directly proceed to call `create`. + NextIdNotUsed, /// The given item ID is unknown. UnknownItem, /// Item is not for sale. @@ -464,12 +478,11 @@ pub mod pallet { /// /// This new collection has no items initially and its owner is the origin. /// - /// The origin must conform to the configured `CreateOrigin` and have sufficient funds free. + /// The origin must be Signed and the sender must have sufficient funds free. /// /// `ItemDeposit` funds of sender are reserved. /// /// Parameters: - /// - `collection`: The identifier of the new collection. This must not be currently in use. /// - `admin`: The admin of this collection. The admin is the initial address of each /// member of the collection's admin team. /// @@ -479,9 +492,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::create())] pub fn create( origin: OriginFor, - collection: T::CollectionId, - admin: AccountIdLookupOf, + admin: ::Source, ) -> DispatchResult { + let collection = NextCollectionId::::get(); + let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; @@ -503,7 +517,6 @@ pub mod pallet { /// /// Unlike `create`, no funds are reserved. /// - /// - `collection`: The identifier of the new item. This must not be currently in use. /// - `owner`: The owner of this collection of items. The owner has full superuser /// permissions /// over this item, but may later change and configure the permissions using @@ -515,13 +528,14 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, - collection: T::CollectionId, - owner: AccountIdLookupOf, + owner: ::Source, free_holding: bool, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; + let collection = NextCollectionId::::get(); + Self::do_create_collection( collection, owner.clone(), @@ -532,6 +546,31 @@ pub mod pallet { ) } + /// Increments the `CollectionId` stored in `NextCollectionId`. + /// + /// This is only callable when the next `CollectionId` is already being + /// used for some other collection. + /// + /// The origin must be Signed and the sender must have sufficient funds + /// free. + /// + /// Emits `NextCollectionIdIncremented` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::try_increment_id())] + pub fn try_increment_id(origin: OriginFor) -> DispatchResult { + ensure_signed(origin)?; + ensure!( + Collection::::contains_key(NextCollectionId::::get()), + Error::::NextIdNotUsed + ); + + let next_id = NextCollectionId::::get().saturating_add(1u32.into()); + NextCollectionId::::set(next_id); + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); + Ok(()) + } + /// Destroy a collection of fungible items. /// /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the @@ -587,7 +626,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - owner: AccountIdLookupOf, + owner: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -616,7 +655,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - check_owner: Option>, + check_owner: Option<::Source>, ) -> DispatchResult { let origin = ensure_signed(origin)?; let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; @@ -634,8 +673,6 @@ pub mod pallet { /// Move an item from the sender account to another. /// - /// This resets the approved account of the item. - /// /// Origin must be Signed and the signing account must be either: /// - the Admin of the `collection`; /// - the Owner of the `item`; @@ -654,7 +691,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - dest: AccountIdLookupOf, + dest: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; @@ -866,7 +903,7 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, collection: T::CollectionId, - owner: AccountIdLookupOf, + owner: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -914,9 +951,9 @@ pub mod pallet { pub fn set_team( origin: OriginFor, collection: T::CollectionId, - issuer: AccountIdLookupOf, - admin: AccountIdLookupOf, - freezer: AccountIdLookupOf, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let issuer = T::Lookup::lookup(issuer)?; @@ -938,15 +975,12 @@ pub mod pallet { /// Approve an item to be transferred by a delegated third-party account. /// - /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be - /// either the owner of the `item` or the admin of the collection. + /// Origin must be Signed and must be the owner of the `item`. /// /// - `collection`: The collection of the item to be approved for delegated transfer. /// - `item`: The item of the item to be approved for delegated transfer. /// - `delegate`: The account to delegate permission to transfer the item. /// - /// Important NOTE: The `approved` account gets reset after each transfer. - /// /// Emits `ApprovedTransfer` on success. /// /// Weight: `O(1)` @@ -955,7 +989,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - delegate: AccountIdLookupOf, + delegate: ::Source, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1008,7 +1042,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - maybe_check_delegate: Option>, + maybe_check_delegate: Option<::Source>, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1059,10 +1093,10 @@ pub mod pallet { pub fn force_item_status( origin: OriginFor, collection: T::CollectionId, - owner: AccountIdLookupOf, - issuer: AccountIdLookupOf, - admin: AccountIdLookupOf, - freezer: AccountIdLookupOf, + owner: ::Source, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, free_holding: bool, is_frozen: bool, ) -> DispatchResult { @@ -1500,7 +1534,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, price: Option>, - whitelisted_buyer: Option>, + whitelisted_buyer: Option<::Source>, ) -> DispatchResult { let origin = ensure_signed(origin)?; let whitelisted_buyer = whitelisted_buyer.map(T::Lookup::lookup).transpose()?; diff --git a/frame/uniques/src/migration.rs b/frame/uniques/src/migration.rs index 8a2a0ef808d90..d301f0a3d1eb1 100644 --- a/frame/uniques/src/migration.rs +++ b/frame/uniques/src/migration.rs @@ -17,7 +17,10 @@ //! Various pieces of common functionality. use super::*; -use frame_support::traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}; +use frame_support::{ + traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}, + weights::Weight, +}; /// Migrate the pallet storage to v1. pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfoAccess>( @@ -42,7 +45,7 @@ pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfo on_chain_storage_version, ); // calculate and return migration weights - T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) + T::DbWeight::get().reads_writes(count as Weight + 1, count as Weight + 1) } else { log::warn!( target: "runtime::uniques", diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs index f6684f86de7f5..8a62f3eabfea5 100644 --- a/frame/uniques/src/mock.rs +++ b/frame/uniques/src/mock.rs @@ -50,8 +50,8 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; + type Origin = Origin; + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -59,7 +59,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -76,7 +76,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -103,7 +103,7 @@ impl pallet_assets::Config for Test { } impl Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type CollectionId = u32; type ItemId = u32; type Currency = Balances; diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index 5078b08ff5d42..84c6dc4557ea8 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -74,7 +74,7 @@ fn events() -> Vec> { let result = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let mock::RuntimeEvent::Uniques(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let mock::Event::Uniques(inner) = e { Some(inner) } else { None }) .collect::>(); System::reset_events(); @@ -92,14 +92,14 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 1, 2, true)); + assert_ok!(Uniques::force_create(Origin::root(), 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 1, 69, 1)); + assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -108,37 +108,32 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Uniques::create(Origin::signed(1), 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0, 0], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); assert_eq!(Balances::reserved_balance(&1), 5); assert!(CollectionMetadataOf::::contains_key(0)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 10)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 10)); assert_eq!(Balances::reserved_balance(&1), 6); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 69, 20)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 20)); assert_eq!(Balances::reserved_balance(&1), 7); assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); assert_eq!(Collection::::get(0).unwrap().items, 2); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); assert_eq!(Balances::reserved_balance(&1), 10); assert!(ItemMetadataOf::::contains_key(0, 42)); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![69, 69], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); assert_eq!(Balances::reserved_balance(&1), 13); assert!(ItemMetadataOf::::contains_key(0, 69)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_eq!(w.items, 2); assert_eq!(w.item_metadatas, 2); - assert_ok!(Uniques::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Collection::::contains_key(0)); @@ -156,19 +151,19 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Uniques::create(Origin::signed(1), 1)); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_noop!(Uniques::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_noop!(Uniques::destroy(Origin::signed(1), 0, w), Error::::BadWitness); }); } #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(Uniques::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); @@ -178,66 +173,54 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); assert_eq!(items(), vec![(3, 0, 42)]); - assert_noop!( - Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 4), - Error::::NoPermission - ); + assert_noop!(Uniques::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(3), 0, 42, 2)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 4)); + assert_ok!(Uniques::approve_transfer(Origin::signed(3), 0, 42, 2)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 4)); }); } #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::freeze(RuntimeOrigin::signed(1), 0, 42)); - assert_noop!(Uniques::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); + assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Uniques::thaw(RuntimeOrigin::signed(1), 0, 42)); - assert_ok!(Uniques::freeze_collection(RuntimeOrigin::signed(1), 0)); - assert_noop!(Uniques::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Uniques::thaw(Origin::signed(1), 0, 42)); + assert_ok!(Uniques::freeze_collection(Origin::signed(1), 0)); + assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Uniques::thaw_collection(RuntimeOrigin::signed(1), 0)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::thaw_collection(Origin::signed(1), 0)); + assert_ok!(Uniques::transfer(Origin::signed(1), 0, 42, 2)); }); } #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); - assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); - assert_noop!( - Uniques::transfer_ownership(RuntimeOrigin::signed(2), 0, 2), - Error::::NoPermission - ); - assert_noop!( - Uniques::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), - Error::::NoPermission - ); - assert_noop!(Uniques::freeze(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Uniques::thaw(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!( - Uniques::mint(RuntimeOrigin::signed(2), 0, 69, 2), - Error::::NoPermission - ); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); assert_noop!( - Uniques::burn(RuntimeOrigin::signed(2), 0, 42, None), + Uniques::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission ); + assert_noop!(Uniques::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Uniques::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); + assert_noop!(Uniques::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_noop!(Uniques::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); + assert_noop!(Uniques::destroy(Origin::signed(2), 0, w), Error::::NoPermission); }); } @@ -247,14 +230,14 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Uniques::create(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Uniques::create(Origin::signed(1), 1)); assert_eq!(collections(), vec![(1, 0)]); assert_noop!( - Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 2), + Uniques::transfer_ownership(Origin::signed(1), 0, 2), Error::::Unaccepted ); - assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); - assert_ok!(Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); + assert_ok!(Uniques::transfer_ownership(Origin::signed(1), 0, 2)); assert_eq!(collections(), vec![(2, 0)]); assert_eq!(Balances::total_balance(&1), 98); @@ -262,23 +245,18 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 2); - assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(1), Some(0))); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(1), Some(0))); assert_noop!( - Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 1), + Uniques::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission ); // Mint and set metadata now and make sure that deposit gets transferred back. - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(2), - 0, - bvec![0u8; 20], - false - )); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false)); - assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); - assert_ok!(Uniques::transfer_ownership(RuntimeOrigin::signed(2), 0, 3)); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Uniques::set_accept_ownership(Origin::signed(3), Some(0))); + assert_ok!(Uniques::transfer_ownership(Origin::signed(2), 0, 3)); assert_eq!(collections(), vec![(3, 0)]); assert_eq!(Balances::total_balance(&2), 57); assert_eq!(Balances::total_balance(&3), 145); @@ -288,7 +266,7 @@ fn transfer_owner_should_work() { // 2's acceptence from before is reset when it became owner, so it cannot be transfered // without a fresh acceptance. assert_noop!( - Uniques::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), + Uniques::transfer_ownership(Origin::signed(3), 0, 2), Error::::Unaccepted ); }); @@ -297,14 +275,14 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); - - assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 0, 42, 2)); - assert_ok!(Uniques::freeze(RuntimeOrigin::signed(4), 0, 42)); - assert_ok!(Uniques::thaw(RuntimeOrigin::signed(3), 0, 42)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); - assert_ok!(Uniques::burn(RuntimeOrigin::signed(3), 0, 42, None)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); + assert_ok!(Uniques::freeze(Origin::signed(4), 0, 42)); + assert_ok!(Uniques::thaw(Origin::signed(3), 0, 42)); + assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 3)); + assert_ok!(Uniques::burn(Origin::signed(3), 0, 42, None)); }); } @@ -313,89 +291,59 @@ fn set_collection_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown item assert_noop!( - Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20], false), + Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); // Cannot add metadata to unowned item assert_noop!( - Uniques::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20], false), + Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 20], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); assert_eq!(Balances::free_balance(&1), 9); assert!(CollectionMetadataOf::::contains_key(0)); // Force origin works, too. - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::root(), - 0, - bvec![0u8; 18], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 15], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 25], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 40], false), + Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0u8; 15], - true - )); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); assert_noop!( - Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15], false), + Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), Error::::Frozen, ); assert_noop!( - Uniques::clear_collection_metadata(RuntimeOrigin::signed(1), 0), + Uniques::clear_collection_metadata(Origin::signed(1), 0), Error::::Frozen ); // Clear Metadata - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::root(), - 0, - bvec![0u8; 15], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 15], false)); assert_noop!( - Uniques::clear_collection_metadata(RuntimeOrigin::signed(2), 0), + Uniques::clear_collection_metadata(Origin::signed(2), 0), Error::::NoPermission ); assert_noop!( - Uniques::clear_collection_metadata(RuntimeOrigin::signed(1), 1), + Uniques::clear_collection_metadata(Origin::signed(1), 1), Error::::UnknownCollection ); - assert_ok!(Uniques::clear_collection_metadata(RuntimeOrigin::signed(1), 0)); + assert_ok!(Uniques::clear_collection_metadata(Origin::signed(1), 0)); assert!(!CollectionMetadataOf::::contains_key(0)); }); } @@ -406,56 +354,53 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( - Uniques::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false), + Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); assert_eq!(Balances::free_balance(&1), 8); assert!(ItemMetadataOf::::contains_key(0, 42)); // Force origin works, too. - assert_ok!(Uniques::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 18], false)); + assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); assert_eq!(Balances::free_balance(&1), 13); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); assert_eq!(Balances::free_balance(&1), 3); // Cannot over-reserve assert_noop!( - Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 40], false), + Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); assert_noop!( - Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false), + Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), Error::::Frozen, ); - assert_noop!( - Uniques::clear_metadata(RuntimeOrigin::signed(1), 0, 42), - Error::::Frozen - ); + assert_noop!(Uniques::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); // Clear Metadata - assert_ok!(Uniques::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); assert_noop!( - Uniques::clear_metadata(RuntimeOrigin::signed(2), 0, 42), + Uniques::clear_metadata(Origin::signed(2), 0, 42), Error::::NoPermission ); assert_noop!( - Uniques::clear_metadata(RuntimeOrigin::signed(1), 1, 42), + Uniques::clear_metadata(Origin::signed(1), 1, 42), Error::::UnknownCollection ); - assert_ok!(Uniques::clear_metadata(RuntimeOrigin::signed(1), 0, 42)); + assert_ok!(Uniques::clear_metadata(Origin::signed(1), 0, 42)); assert!(!ItemMetadataOf::::contains_key(0, 42)); }); } @@ -465,23 +410,11 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); - assert_ok!(Uniques::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - Some(0), - bvec![0], - bvec![0] - )); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - Some(0), - bvec![1], - bvec![0] - )); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); assert_eq!( attributes(0), vec![ @@ -492,13 +425,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - None, - bvec![0], - bvec![0; 10] - )); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); assert_eq!( attributes(0), vec![ @@ -509,7 +436,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 18); - assert_ok!(Uniques::clear_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1])); + assert_ok!(Uniques::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); assert_eq!( attributes(0), vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] @@ -517,7 +444,7 @@ fn set_attribute_should_work() { assert_eq!(Balances::reserved_balance(1), 15); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Uniques::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); assert_eq!(attributes(0), vec![]); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -528,23 +455,11 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); - assert_ok!(Uniques::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - Some(0), - bvec![0], - bvec![0] - )); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - Some(1), - bvec![0], - bvec![0] - )); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); assert_eq!( attributes(0), vec![ @@ -555,33 +470,15 @@ fn set_attribute_should_respect_freeze() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![], true)); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!( - Uniques::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0]), - e - ); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - Some(0), - bvec![0], - bvec![1] - )); + assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 0, bvec![], true)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!( - Uniques::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1]), - e - ); - assert_ok!(Uniques::set_attribute( - RuntimeOrigin::signed(1), - 0, - Some(1), - bvec![0], - bvec![1] - )); + assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); }); } @@ -590,42 +487,32 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 69, 2)); - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0; 20], - false - )); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); // force item status to be free holding - assert_ok!(Uniques::force_item_status(RuntimeOrigin::root(), 0, 1, 1, 1, 1, true, false)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 142, 1)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 169, 2)); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20], false)); + assert_ok!(Uniques::force_item_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 142, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 169, 2)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); - assert_ok!(Uniques::redeposit(RuntimeOrigin::signed(1), 0, bvec![0, 42, 50, 69, 100])); + assert_ok!(Uniques::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); assert_eq!(Balances::reserved_balance(1), 63); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 42); - assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 21); - assert_ok!(Uniques::set_collection_metadata( - RuntimeOrigin::signed(1), - 0, - bvec![0; 20], - false - )); + assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -634,29 +521,23 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); - assert_ok!(Uniques::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); + assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_noop!( - Uniques::burn(RuntimeOrigin::signed(5), 0, 42, Some(5)), + Uniques::burn(Origin::signed(5), 0, 42, Some(5)), Error::::UnknownCollection ); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 0, 42, 5)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 0, 69, 5)); + assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 5)); + assert_ok!(Uniques::mint(Origin::signed(2), 0, 69, 5)); assert_eq!(Balances::reserved_balance(1), 2); - assert_noop!( - Uniques::burn(RuntimeOrigin::signed(0), 0, 42, None), - Error::::NoPermission - ); - assert_noop!( - Uniques::burn(RuntimeOrigin::signed(5), 0, 42, Some(6)), - Error::::WrongOwner - ); + assert_noop!(Uniques::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); + assert_noop!(Uniques::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); - assert_ok!(Uniques::burn(RuntimeOrigin::signed(5), 0, 42, Some(5))); - assert_ok!(Uniques::burn(RuntimeOrigin::signed(3), 0, 69, Some(5))); + assert_ok!(Uniques::burn(Origin::signed(5), 0, 42, Some(5))); + assert_ok!(Uniques::burn(Origin::signed(3), 0, 69, Some(5))); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -664,92 +545,45 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); - assert_noop!( - Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 3), - Error::::NoPermission - ); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); + assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); assert!(Item::::get(0, 42).unwrap().approved.is_none()); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 2)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 2)); - }); -} - -#[test] -fn approved_account_gets_reset_after_transfer() { - new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 5)); - - // this shouldn't work because we have just transfered the item to another account. - assert_noop!( - Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4), - Error::::NoPermission - ); - // The new owner can transfer fine: - assert_ok!(Uniques::transfer(RuntimeOrigin::signed(5), 0, 42, 6)); - }); -} - -#[test] -fn approved_account_gets_reset_after_buy_item() { - new_test_ext().execute_with(|| { - let item = 1; - let price = 15; - - Balances::make_free_balance_be(&2, 100); - - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, item, 1)); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(1), 0, item, 5)); - - assert_ok!(Uniques::set_price(RuntimeOrigin::signed(1), 0, item, Some(price), None)); - - assert_ok!(Uniques::buy_item(RuntimeOrigin::signed(2), 0, item, price)); - - // this shouldn't work because the item has been bough and the approved account should be - // reset. - assert_noop!( - Uniques::transfer(RuntimeOrigin::signed(5), 0, item, 4), - Error::::NoPermission - ); + assert_ok!(Uniques::approve_transfer(Origin::signed(4), 0, 42, 2)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 2)); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(2), 1, 42, None), + Uniques::cancel_approval(Origin::signed(2), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 43, None), + Uniques::cancel_approval(Origin::signed(2), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(3), 0, 42, None), + Uniques::cancel_approval(Origin::signed(3), 0, 42, None), Error::::NoPermission ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 42, Some(4)), + Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 42, Some(3))); + assert_ok!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 42, None), + Uniques::cancel_approval(Origin::signed(2), 0, 42, None), Error::::NoDelegate ); }); @@ -758,26 +592,26 @@ fn cancel_approval_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(1), 1, 42, None), + Uniques::cancel_approval(Origin::signed(1), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 43, None), + Uniques::cancel_approval(Origin::signed(1), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 42, Some(4)), + Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 42, Some(3))); + assert_ok!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 42, None), + Uniques::cancel_approval(Origin::signed(1), 0, 42, None), Error::::NoDelegate ); }); @@ -786,26 +620,26 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::root(), 1, 42, None), + Uniques::cancel_approval(Origin::root(), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::root(), 0, 43, None), + Uniques::cancel_approval(Origin::root(), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::root(), 0, 42, Some(4)), + Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(RuntimeOrigin::root(), 0, 42, Some(3))); + assert_ok!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(RuntimeOrigin::root(), 0, 42, None), + Uniques::cancel_approval(Origin::root(), 0, 42, None), Error::::NoDelegate ); }); @@ -819,11 +653,11 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), collection_id, user_id, true)); + assert_ok!(Uniques::force_create(Origin::root(), user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Uniques::set_collection_max_supply( - RuntimeOrigin::signed(user_id), + Origin::signed(user_id), collection_id, max_supply )); @@ -836,7 +670,7 @@ fn max_supply_should_work() { assert_noop!( Uniques::set_collection_max_supply( - RuntimeOrigin::signed(user_id), + Origin::signed(user_id), collection_id, max_supply + 1 ), @@ -844,16 +678,16 @@ fn max_supply_should_work() { ); // validate we can't mint more to max supply - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, 0, user_id)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, 1, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 0, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 1, user_id)); assert_noop!( - Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, 2, user_id), + Uniques::mint(Origin::signed(user_id), collection_id, 2, user_id), Error::::MaxSupplyReached ); // validate we remove the CollectionMaxSupply record when we destroy the collection assert_ok!(Uniques::destroy( - RuntimeOrigin::signed(user_id), + Origin::signed(user_id), collection_id, Collection::::get(collection_id).unwrap().destroy_witness() )); @@ -861,21 +695,54 @@ fn max_supply_should_work() { }); } +#[test] +fn try_increment_id_works() { + new_test_ext().execute_with(|| { + // should fail because the next `CollectionId` is not being used. + assert_noop!(Uniques::try_increment_id(Origin::signed(2)), Error::::NextIdNotUsed); + + // create two collections & check for events. + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 1 })); + assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 2 })); + + // there are now two collections. + assert_eq!(Uniques::get_next_id(), 2); + + // reset the collections counter to test if the `try_increment_id` + // works. + Uniques::set_next_id(0); + assert_ok!(Uniques::try_increment_id(Origin::signed(2))); + + // `try_increment_id` should emit an event when successful. + assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 1 })); + + // because reset, the collections count should be now 1 + assert_eq!(Uniques::get_next_id(), 1); + + // increment the collections count again. + assert_ok!(Uniques::try_increment_id(Origin::signed(2))); + // should fail because the next `CollectionId` is not being used. + assert_noop!(Uniques::try_increment_id(Origin::signed(2)), Error::::NextIdNotUsed); + }); +} + #[test] fn set_price_should_work() { new_test_ext().execute_with(|| { let user_id = 1; - let collection_id = 0; + let collection_id: u32 = 0; let item_1 = 1; let item_2 = 2; - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), collection_id, user_id, true)); + assert_ok!(Uniques::force_create(Origin::root(), user_id, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, user_id)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); assert_ok!(Uniques::set_price( - RuntimeOrigin::signed(user_id), + Origin::signed(user_id), collection_id, item_1, Some(1.into()), @@ -883,7 +750,7 @@ fn set_price_should_work() { )); assert_ok!(Uniques::set_price( - RuntimeOrigin::signed(user_id), + Origin::signed(user_id), collection_id, item_2, Some(2.into()), @@ -906,13 +773,7 @@ fn set_price_should_work() { })); // validate we can unset the price - assert_ok!(Uniques::set_price( - RuntimeOrigin::signed(user_id), - collection_id, - item_2, - None, - None - )); + assert_ok!(Uniques::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); assert!(events().contains(&Event::::ItemPriceRemoved { collection: collection_id, item: item_2 @@ -927,7 +788,7 @@ fn buy_item_should_work() { let user_1 = 1; let user_2 = 2; let user_3 = 3; - let collection_id = 0; + let collection_id: u32 = 0; let item_1 = 1; let item_2 = 2; let item_3 = 3; @@ -939,14 +800,14 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Uniques::force_create(RuntimeOrigin::root(), collection_id, user_1, true)); + assert_ok!(Uniques::force_create(Origin::root(), user_1, true)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_1)); - assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_3, user_1)); assert_ok!(Uniques::set_price( - RuntimeOrigin::signed(user_1), + Origin::signed(user_1), collection_id, item_1, Some(price_1.into()), @@ -954,7 +815,7 @@ fn buy_item_should_work() { )); assert_ok!(Uniques::set_price( - RuntimeOrigin::signed(user_1), + Origin::signed(user_1), collection_id, item_2, Some(price_2.into()), @@ -963,16 +824,16 @@ fn buy_item_should_work() { // can't buy for less assert_noop!( - Uniques::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_1, 1.into()), + Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, 1.into()), Error::::BidTooLow ); // pass the higher price to validate it will still deduct correctly assert_ok!(Uniques::buy_item( - RuntimeOrigin::signed(user_2), + Origin::signed(user_2), collection_id, item_1, - (price_1 + 1).into(), + (price_1 + 1).into() )); // validate the new owner & balances @@ -983,22 +844,22 @@ fn buy_item_should_work() { // can't buy from yourself assert_noop!( - Uniques::buy_item(RuntimeOrigin::signed(user_1), collection_id, item_2, price_2.into()), + Uniques::buy_item(Origin::signed(user_1), collection_id, item_2, price_2.into()), Error::::NoPermission ); // can't buy when the item is listed for a specific buyer assert_noop!( - Uniques::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_2, price_2.into()), + Uniques::buy_item(Origin::signed(user_2), collection_id, item_2, price_2.into()), Error::::NoPermission ); // can buy when I'm a whitelisted buyer assert_ok!(Uniques::buy_item( - RuntimeOrigin::signed(user_3), + Origin::signed(user_3), collection_id, item_2, - price_2.into(), + price_2.into() )); assert!(events().contains(&Event::::ItemBought { @@ -1014,14 +875,14 @@ fn buy_item_should_work() { // can't buy when item is not for sale assert_noop!( - Uniques::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_3, price_2.into()), + Uniques::buy_item(Origin::signed(user_2), collection_id, item_3, price_2.into()), Error::::NotForSale ); // ensure we can't buy an item when the collection or an item is frozen { assert_ok!(Uniques::set_price( - RuntimeOrigin::signed(user_1), + Origin::signed(user_1), collection_id, item_3, Some(price_1.into()), @@ -1029,32 +890,26 @@ fn buy_item_should_work() { )); // freeze collection - assert_ok!(Uniques::freeze_collection(RuntimeOrigin::signed(user_1), collection_id)); + assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); - let buy_item_call = mock::RuntimeCall::Uniques(crate::Call::::buy_item { + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1.into(), }); - assert_noop!( - buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), - Error::::Frozen - ); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); - assert_ok!(Uniques::thaw_collection(RuntimeOrigin::signed(user_1), collection_id)); + assert_ok!(Uniques::thaw_collection(Origin::signed(user_1), collection_id)); // freeze item - assert_ok!(Uniques::freeze(RuntimeOrigin::signed(user_1), collection_id, item_3)); + assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); - let buy_item_call = mock::RuntimeCall::Uniques(crate::Call::::buy_item { + let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1.into(), }); - assert_noop!( - buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), - Error::::Frozen - ); + assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); } }); } diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 8a8e1090bb718..75bb89f26b428 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,24 +18,23 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-07-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_uniques // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_uniques +// --chain=dev // --output=./frame/uniques/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -71,6 +70,7 @@ pub trait WeightInfo { fn cancel_approval() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; + fn try_increment_id() -> Weight; fn set_price() -> Weight; fn buy_item() -> Weight; } @@ -78,21 +78,21 @@ pub trait WeightInfo { /// Weights for pallet_uniques using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 35_358 nanoseconds. - Weight::from_ref_time(35_935_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (30_481_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - // Minimum execution time: 22_767 nanoseconds. - Weight::from_ref_time(23_235_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (19_811_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -106,236 +106,219 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Minimum execution time: 2_453_194 nanoseconds. - Weight::from_ref_time(2_469_109_000 as u64) - // Standard Error: 27_900 - .saturating_add(Weight::from_ref_time(8_974_176 as u64).saturating_mul(n as u64)) - // Standard Error: 27_900 - .saturating_add(Weight::from_ref_time(344_842 as u64).saturating_mul(m as u64)) - // Standard Error: 27_900 - .saturating_add(Weight::from_ref_time(185_438 as u64).saturating_mul(a as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(m as u64))) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) + (0 as Weight) + // Standard Error: 17_000 + .saturating_add((10_950_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 17_000 + .saturating_add((1_657_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 17_000 + .saturating_add((1_512_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - // Minimum execution time: 45_115 nanoseconds. - Weight::from_ref_time(45_746_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (36_980_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - // Minimum execution time: 46_447 nanoseconds. - Weight::from_ref_time(46_994_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (38_771_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - // Minimum execution time: 35_953 nanoseconds. - Weight::from_ref_time(36_375_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (29_914_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:102 w:102) + // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - // Minimum execution time: 24_238 nanoseconds. - Weight::from_ref_time(24_788_000 as u64) - // Standard Error: 9_232 - .saturating_add(Weight::from_ref_time(11_322_011 as u64).saturating_mul(i as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 16_000 + .saturating_add((12_759_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - // Minimum execution time: 28_595 nanoseconds. - Weight::from_ref_time(29_280_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (22_425_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - // Minimum execution time: 28_581 nanoseconds. - Weight::from_ref_time(29_038_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (23_011_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - // Minimum execution time: 24_298 nanoseconds. - Weight::from_ref_time(24_742_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (17_718_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - // Minimum execution time: 24_004 nanoseconds. - Weight::from_ref_time(24_536_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (17_619_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - // Minimum execution time: 32_599 nanoseconds. - Weight::from_ref_time(33_201_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (25_869_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 25_137 nanoseconds. - Weight::from_ref_time(25_877_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (18_058_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - // Minimum execution time: 27_736 nanoseconds. - Weight::from_ref_time(28_279_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (20_720_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - // Minimum execution time: 51_195 nanoseconds. - Weight::from_ref_time(51_674_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (41_808_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - // Minimum execution time: 50_159 nanoseconds. - Weight::from_ref_time(51_412_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (39_866_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - // Minimum execution time: 42_608 nanoseconds. - Weight::from_ref_time(42_880_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (34_767_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 43_239 nanoseconds. - Weight::from_ref_time(43_752_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (33_910_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - // Minimum execution time: 41_224 nanoseconds. - Weight::from_ref_time(41_974_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (33_827_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - // Minimum execution time: 40_836 nanoseconds. - Weight::from_ref_time(41_864_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (31_998_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 29_558 nanoseconds. - Weight::from_ref_time(29_948_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (23_607_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 29_694 nanoseconds. - Weight::from_ref_time(30_156_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (23_341_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - // Minimum execution time: 27_819 nanoseconds. - Weight::from_ref_time(28_245_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (21_969_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - // Minimum execution time: 26_317 nanoseconds. - Weight::from_ref_time(26_893_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (20_355_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques NextCollectionId (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn try_increment_id() -> Weight { + (19_233_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - // Minimum execution time: 26_546 nanoseconds. - Weight::from_ref_time(27_142_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + (20_733_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - // Minimum execution time: 49_238 nanoseconds. - Weight::from_ref_time(50_444_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (40_798_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 35_358 nanoseconds. - Weight::from_ref_time(35_935_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (30_481_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } + // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - // Minimum execution time: 22_767 nanoseconds. - Weight::from_ref_time(23_235_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (19_811_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -349,215 +332,198 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Minimum execution time: 2_453_194 nanoseconds. - Weight::from_ref_time(2_469_109_000 as u64) - // Standard Error: 27_900 - .saturating_add(Weight::from_ref_time(8_974_176 as u64).saturating_mul(n as u64)) - // Standard Error: 27_900 - .saturating_add(Weight::from_ref_time(344_842 as u64).saturating_mul(m as u64)) - // Standard Error: 27_900 - .saturating_add(Weight::from_ref_time(185_438 as u64).saturating_mul(a as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(m as u64))) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) + (0 as Weight) + // Standard Error: 17_000 + .saturating_add((10_950_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 17_000 + .saturating_add((1_657_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 17_000 + .saturating_add((1_512_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - // Minimum execution time: 45_115 nanoseconds. - Weight::from_ref_time(45_746_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (36_980_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - // Minimum execution time: 46_447 nanoseconds. - Weight::from_ref_time(46_994_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (38_771_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - // Minimum execution time: 35_953 nanoseconds. - Weight::from_ref_time(36_375_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (29_914_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:102 w:102) + // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - // Minimum execution time: 24_238 nanoseconds. - Weight::from_ref_time(24_788_000 as u64) - // Standard Error: 9_232 - .saturating_add(Weight::from_ref_time(11_322_011 as u64).saturating_mul(i as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) + (0 as Weight) + // Standard Error: 16_000 + .saturating_add((12_759_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - // Minimum execution time: 28_595 nanoseconds. - Weight::from_ref_time(29_280_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (22_425_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - // Minimum execution time: 28_581 nanoseconds. - Weight::from_ref_time(29_038_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (23_011_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - // Minimum execution time: 24_298 nanoseconds. - Weight::from_ref_time(24_742_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (17_718_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - // Minimum execution time: 24_004 nanoseconds. - Weight::from_ref_time(24_536_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (17_619_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - // Minimum execution time: 32_599 nanoseconds. - Weight::from_ref_time(33_201_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (25_869_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 25_137 nanoseconds. - Weight::from_ref_time(25_877_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (18_058_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - // Minimum execution time: 27_736 nanoseconds. - Weight::from_ref_time(28_279_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (20_720_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - // Minimum execution time: 51_195 nanoseconds. - Weight::from_ref_time(51_674_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (41_808_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - // Minimum execution time: 50_159 nanoseconds. - Weight::from_ref_time(51_412_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (39_866_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - // Minimum execution time: 42_608 nanoseconds. - Weight::from_ref_time(42_880_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (34_767_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 43_239 nanoseconds. - Weight::from_ref_time(43_752_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (33_910_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - // Minimum execution time: 41_224 nanoseconds. - Weight::from_ref_time(41_974_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (33_827_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - // Minimum execution time: 40_836 nanoseconds. - Weight::from_ref_time(41_864_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (31_998_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 29_558 nanoseconds. - Weight::from_ref_time(29_948_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (23_607_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 29_694 nanoseconds. - Weight::from_ref_time(30_156_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (23_341_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - // Minimum execution time: 27_819 nanoseconds. - Weight::from_ref_time(28_245_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (21_969_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - // Minimum execution time: 26_317 nanoseconds. - Weight::from_ref_time(26_893_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (20_355_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques NextCollectionId (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn try_increment_id() -> Weight { + (19_233_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - // Minimum execution time: 26_546 nanoseconds. - Weight::from_ref_time(27_142_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + (20_733_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - // Minimum execution time: 49_238 nanoseconds. - Weight::from_ref_time(50_444_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (40_798_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } } diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index ac4f52c6bb9f3..7b56d7974e4b5 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -25,14 +25,11 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } -pallet-collective = { version = "4.0.0-dev", path = "../collective" } -pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 07bc14951cb3b..018280f69baeb 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -25,15 +25,15 @@ use frame_system::RawOrigin; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::RuntimeEvent) { +fn assert_last_event(generic_event: ::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } benchmarks! { - where_clause { where ::PalletsOrigin: Clone } + where_clause { where ::PalletsOrigin: Clone } batch { let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); @@ -54,7 +54,7 @@ benchmarks! { batch_all { let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); @@ -68,14 +68,14 @@ benchmarks! { dispatch_as { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); - let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into(); - let pallets_origin: ::PalletsOrigin = origin.caller().clone(); + let origin: T::Origin = RawOrigin::Signed(caller).into(); + let pallets_origin: ::PalletsOrigin = origin.caller().clone(); let pallets_origin = Into::::into(pallets_origin); }: _(RawOrigin::Root, Box::new(pallets_origin), call) force_batch { let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 41710be930b90..0aae2615702dd 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -58,12 +58,13 @@ pub mod weights; use codec::{Decode, Encode}; use frame_support::{ - dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, + dispatch::PostDispatchInfo, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, + weights::{extract_actual_weight, GetDispatchInfo}, }; use sp_core::TypeId; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; +use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; @@ -83,21 +84,21 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From + IsType<::RuntimeEvent>; + type Event: From + IsType<::Event>; /// The overarching call type. - type RuntimeCall: Parameter - + Dispatchable + type Call: Parameter + + Dispatchable + GetDispatchInfo + From> - + UnfilteredDispatchable + + UnfilteredDispatchable + IsSubType> - + IsType<::RuntimeCall>; + + IsType<::Call>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + - Into<::RuntimeOrigin> + - IsType<<::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; + Into<::Origin> + + IsType<<::Origin as frame_support::traits::OriginTrait>::PalletsOrigin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -124,7 +125,7 @@ pub mod pallet { // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm // the `size_of` of the `Call` can be different. To ensure that this don't leads to // mismatches between native/wasm or to different metadata for the same runtime, we - // algin the call size. The value is chosen big enough to hopefully never reach it. + // algin the call size. The value is choosen big enough to hopefully never reach it. const CALL_ALIGN: u32 = 1024; #[pallet::extra_constants] @@ -132,9 +133,8 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 + - CALL_ALIGN - 1) / CALL_ALIGN) * - CALL_ALIGN; + let call_size = ((sp_std::mem::size_of::<::Call>() as u32 + CALL_ALIGN - + 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -147,7 +147,7 @@ pub mod pallet { fn integrity_test() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( - sp_std::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, + sp_std::mem::size_of::<::Call>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); @@ -164,13 +164,13 @@ pub mod pallet { impl Pallet { /// Send a batch of dispatch calls. /// - /// May be called from any origin except `None`. + /// May be called from any origin. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// - /// If origin is root then the calls are dispatched without checking origin filter. (This - /// includes bypassing `frame_system::Config::BaseCallFilter`). + /// If origin is root then call are dispatch without checking origin filter. (This includes + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -185,7 +185,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -201,19 +201,14 @@ pub mod pallet { })] pub fn batch( origin: OriginFor, - calls: Vec<::RuntimeCall>, + calls: Vec<::Call>, ) -> DispatchResultWithPostInfo { - // Do not allow the `None` origin. - if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()) - } - let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight = Weight::zero(); + let mut weight: Weight = 0; for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. @@ -258,16 +253,16 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() + .saturating_add(dispatch_info.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(dispatch_info.weight), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, ) })] pub fn as_derivative( origin: OriginFor, index: u16, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; @@ -291,13 +286,13 @@ pub mod pallet { /// Send a batch of dispatch calls and atomically execute them. /// The whole transaction will rollback and fail if any of the calls failed. /// - /// May be called from any origin except `None`. + /// May be called from any origin. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// - /// If origin is root then the calls are dispatched without checking origin filter. (This - /// includes bypassing `frame_system::Config::BaseCallFilter`). + /// If origin is root then call are dispatch without checking origin filter. (This includes + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -306,7 +301,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -322,19 +317,14 @@ pub mod pallet { })] pub fn batch_all( origin: OriginFor, - calls: Vec<::RuntimeCall>, + calls: Vec<::Call>, ) -> DispatchResultWithPostInfo { - // Do not allow the `None` origin. - if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()) - } - let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight = Weight::zero(); + let mut weight: Weight = 0; for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. @@ -343,12 +333,10 @@ pub mod pallet { } else { let mut filtered_origin = origin.clone(); // Don't allow users to nest `batch_all` calls. - filtered_origin.add_filter( - move |c: &::RuntimeCall| { - let c = ::RuntimeCall::from_ref(c); - !matches!(c.is_sub_type(), Some(Call::batch_all { .. })) - }, - ); + filtered_origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); + !matches!(c.is_sub_type(), Some(Call::batch_all { .. })) + }); call.dispatch(filtered_origin) }; // Add the weight of this call. @@ -364,7 +352,7 @@ pub mod pallet { } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); - Ok(Some(base_weight.saturating_add(weight)).into()) + Ok(Some(base_weight + weight).into()) } /// Dispatches a function call with a provided origin. @@ -388,7 +376,7 @@ pub mod pallet { pub fn dispatch_as( origin: OriginFor, as_origin: Box, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResult { ensure_root(origin)?; @@ -403,13 +391,13 @@ pub mod pallet { /// Send a batch of dispatch calls. /// Unlike `batch`, it allows errors and won't interrupt. /// - /// May be called from any origin except `None`. + /// May be called from any origin. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// - /// If origin is root then the calls are dispatch without checking origin filter. (This - /// includes bypassing `frame_system::Config::BaseCallFilter`). + /// If origin is root then call are dispatch without checking origin filter. (This includes + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -418,7 +406,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -434,19 +422,14 @@ pub mod pallet { })] pub fn force_batch( origin: OriginFor, - calls: Vec<::RuntimeCall>, + calls: Vec<::Call>, ) -> DispatchResultWithPostInfo { - // Do not allow the `None` origin. - if ensure_none(origin.clone()).is_ok() { - return Err(BadOrigin.into()) - } - let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight = Weight::zero(); + let mut weight: Weight = 0; // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { @@ -472,7 +455,7 @@ pub mod pallet { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); - Ok(Some(base_weight.saturating_add(weight)).into()) + Ok(Some(base_weight + weight).into()) } } } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index c374f5ae21099..6368473ac8708 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -24,21 +24,17 @@ use super::*; use crate as utility; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, - dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable, Pays}, - error::BadOrigin, + dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable}, parameter_types, storage, - traits::{ConstU32, ConstU64, Contains, GenesisBuild}, - weights::Weight, + traits::{ConstU32, ConstU64, Contains}, + weights::{Pays, Weight}, }; -use pallet_collective::{EnsureProportionAtLeast, Instance1}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{BlakeTwo256, Hash, IdentityLookup}, + traits::{BlakeTwo256, IdentityLookup}, }; -type BlockNumber = u64; - // example module to test behaviors. #[frame_support::pallet] pub mod example { @@ -85,42 +81,6 @@ pub mod example { } } -mod mock_democracy { - pub use pallet::*; - #[frame_support::pallet] - pub mod pallet { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config + Sized { - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; - type ExternalMajorityOrigin: EnsureOrigin; - } - - #[pallet::call] - impl Pallet { - #[pallet::weight(0)] - pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { - T::ExternalMajorityOrigin::ensure_origin(origin)?; - Self::deposit_event(Event::::ExternalProposed); - Ok(()) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - ExternalProposed, - } - } -} - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -131,34 +91,31 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Call, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Council: pallet_collective::, Utility: utility::{Pallet, Call, Event}, Example: example::{Pallet, Call}, - Democracy: mock_democracy::{Pallet, Call, Event}, } ); parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::MAX); + frame_system::limits::BlockWeights::simple_max(Weight::max_value()); } impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -177,67 +134,37 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } - -impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = ConstU64<3>; - type WeightInfo = (); -} - -const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; parameter_types! { pub const MultisigDepositBase: u64 = 1; pub const MultisigDepositFactor: u64 = 1; - pub const MaxSignatories: u32 = 3; - pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; - pub const MaxProposals: u32 = 100; - pub const MaxMembers: u32 = 100; -} - -type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type MotionDuration = MotionDuration; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = (); + pub const MaxSignatories: u16 = 3; } impl example::Config for Test {} pub struct TestBaseCallFilter; -impl Contains for TestBaseCallFilter { - fn contains(c: &RuntimeCall) -> bool { +impl Contains for TestBaseCallFilter { + fn contains(c: &Call) -> bool { match *c { // Transfer works. Use `transfer_keep_alive` for a call that doesn't pass the filter. - RuntimeCall::Balances(pallet_balances::Call::transfer { .. }) => true, - RuntimeCall::Utility(_) => true, + Call::Balances(pallet_balances::Call::transfer { .. }) => true, + Call::Utility(_) => true, // For benchmarking, this acts as a noop call - RuntimeCall::System(frame_system::Call::remark { .. }) => true, + Call::System(frame_system::Call::remark { .. }) => true, // For tests - RuntimeCall::Example(_) => true, - // For council origin tests. - RuntimeCall::Democracy(_) => true, + Call::Example(_) => true, _ => false, } } } -impl mock_democracy::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ExternalMajorityOrigin = EnsureProportionAtLeast; -} impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type PalletsOrigin = OriginCaller; type WeightInfo = (); } @@ -247,7 +174,6 @@ type UtilityCall = crate::Call; use frame_system::Call as SystemCall; use pallet_balances::{Call as BalancesCall, Error as BalancesError}; -use pallet_timestamp::Call as TimestampCall; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -256,41 +182,29 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } .assimilate_storage(&mut t) .unwrap(); - - pallet_collective::GenesisConfig:: { - members: vec![1, 2, 3], - phantom: Default::default(), - } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } -fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) } -fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> RuntimeCall { - RuntimeCall::Example(ExampleCall::foobar { err, start_weight, end_weight }) +fn call_foobar(err: bool, start_weight: u64, end_weight: Option) -> Call { + Call::Example(ExampleCall::foobar { err, start_weight, end_weight }) } #[test] fn as_derivative_works() { new_test_ext().execute_with(|| { let sub_1_0 = Utility::derivative_account_id(1, 0); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), sub_1_0, 5)); + assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); assert_err_ignore_postinfo!( - Utility::as_derivative(RuntimeOrigin::signed(1), 1, Box::new(call_transfer(6, 3)),), + Utility::as_derivative(Origin::signed(1), 1, Box::new(call_transfer(6, 3)),), BalancesError::::InsufficientBalance ); - assert_ok!(Utility::as_derivative( - RuntimeOrigin::signed(1), - 0, - Box::new(call_transfer(2, 3)), - )); + assert_ok!(Utility::as_derivative(Origin::signed(1), 0, Box::new(call_transfer(2, 3)),)); assert_eq!(Balances::free_balance(sub_1_0), 2); assert_eq!(Balances::free_balance(2), 13); }); @@ -299,41 +213,35 @@ fn as_derivative_works() { #[test] fn as_derivative_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = Weight::from_ref_time(100); - let end_weight = Weight::from_ref_time(75); + let start_weight = 100; + let end_weight = 75; let diff = start_weight - end_weight; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); - let call = RuntimeCall::Utility(UtilityCall::as_derivative { - index: 0, - call: Box::new(inner_call), - }); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), info.weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); - let call = RuntimeCall::Utility(UtilityCall::as_derivative { - index: 0, - call: Box::new(inner_call), - }); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); // Diff is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight - diff); // Full weight when err let inner_call = call_foobar(true, start_weight, None); - let call = RuntimeCall::Utility(UtilityCall::as_derivative { - index: 0, - call: Box::new(inner_call), - }); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_noop!( result, DispatchErrorWithPostInfo { @@ -348,12 +256,10 @@ fn as_derivative_handles_weight_refund() { // Refund weight when err let inner_call = call_foobar(true, start_weight, Some(end_weight)); - let call = RuntimeCall::Utility(UtilityCall::as_derivative { - index: 0, - call: Box::new(inner_call), - }); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_noop!( result, DispatchErrorWithPostInfo { @@ -373,9 +279,9 @@ fn as_derivative_filters() { new_test_ext().execute_with(|| { assert_err_ignore_postinfo!( Utility::as_derivative( - RuntimeOrigin::signed(1), + Origin::signed(1), 1, - Box::new(RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { + Box::new(Call::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })), @@ -389,25 +295,16 @@ fn as_derivative_filters() { fn batch_with_root_works() { new_test_ext().execute_with(|| { let k = b"a".to_vec(); - let call = RuntimeCall::System(frame_system::Call::set_storage { - items: vec![(k.clone(), k.clone())], - }); + let call = + Call::System(frame_system::Call::set_storage { items: vec![(k.clone(), k.clone())] }); assert!(!TestBaseCallFilter::contains(&call)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( - RuntimeOrigin::root(), + Origin::root(), vec![ - RuntimeCall::Balances(BalancesCall::force_transfer { - source: 1, - dest: 2, - value: 5 - }), - RuntimeCall::Balances(BalancesCall::force_transfer { - source: 1, - dest: 2, - value: 5 - }), + Call::Balances(BalancesCall::force_transfer { source: 1, dest: 2, value: 5 }), + Call::Balances(BalancesCall::force_transfer { source: 1, dest: 2, value: 5 }), call, // Check filters are correctly bypassed ] )); @@ -423,7 +320,7 @@ fn batch_with_signed_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( - RuntimeOrigin::signed(1), + Origin::signed(1), vec![call_transfer(2, 5), call_transfer(2, 5)] ),); assert_eq!(Balances::free_balance(1), 0); @@ -435,11 +332,8 @@ fn batch_with_signed_works() { fn batch_with_signed_filters() { new_test_ext().execute_with(|| { assert_ok!(Utility::batch( - RuntimeOrigin::signed(1), - vec![RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { - dest: 2, - value: 1 - })] + Origin::signed(1), + vec![Call::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })] ),); System::assert_last_event( utility::Event::BatchInterrupted { @@ -457,7 +351,7 @@ fn batch_early_exit_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( - RuntimeOrigin::signed(1), + Origin::signed(1), vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5),] ),); assert_eq!(Balances::free_balance(1), 5); @@ -469,42 +363,41 @@ fn batch_early_exit_works() { fn batch_weight_calculation_doesnt_overflow() { use sp_runtime::Perbill; new_test_ext().execute_with(|| { - let big_call = - RuntimeCall::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); - assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); + let big_call = Call::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); + assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); // 3 * 50% saturates to 100% - let batch_call = RuntimeCall::Utility(crate::Call::batch { + let batch_call = Call::Utility(crate::Call::batch { calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], }); - assert_eq!(batch_call.get_dispatch_info().weight, Weight::MAX); + assert_eq!(batch_call.get_dispatch_info().weight, Weight::max_value()); }); } #[test] fn batch_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = Weight::from_ref_time(100); - let end_weight = Weight::from_ref_time(75); + let start_weight = 100; + let end_weight = 75; let diff = start_weight - end_weight; - let batch_len = 4; + let batch_len: Weight = 4; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); let batch_calls = vec![inner_call; batch_len as usize]; - let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), info.weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); let batch_calls = vec![inner_call; batch_len as usize]; - let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); // Diff is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -513,9 +406,9 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, None); let bad_call = call_foobar(true, start_weight, None); let batch_calls = vec![good_call, bad_call]; - let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), @@ -527,10 +420,10 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = batch_calls.len() as u64; - let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let batch_len = batch_calls.len() as Weight; + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), @@ -541,9 +434,9 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call.clone(), bad_call]; - let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), @@ -562,7 +455,7 @@ fn batch_all_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch_all( - RuntimeOrigin::signed(1), + Origin::signed(1), vec![call_transfer(2, 5), call_transfer(2, 5)] ),); assert_eq!(Balances::free_balance(1), 0); @@ -578,11 +471,11 @@ fn batch_all_revert() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - let batch_all_calls = RuntimeCall::Utility(crate::Call::::batch_all { + let batch_all_calls = Call::Utility(crate::Call::::batch_all { calls: vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5)], }); assert_noop!( - batch_all_calls.dispatch(RuntimeOrigin::signed(1)), + batch_all_calls.dispatch(Origin::signed(1)), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { actual_weight: Some( @@ -601,26 +494,26 @@ fn batch_all_revert() { #[test] fn batch_all_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = Weight::from_ref_time(100); - let end_weight = Weight::from_ref_time(75); + let start_weight = 100; + let end_weight = 75; let diff = start_weight - end_weight; - let batch_len = 4; + let batch_len: Weight = 4; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); let batch_calls = vec![inner_call; batch_len as usize]; - let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), info.weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); let batch_calls = vec![inner_call; batch_len as usize]; - let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_ok!(result); // Diff is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -629,9 +522,9 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, None); let bad_call = call_foobar(true, start_weight, None); let batch_calls = vec![good_call, bad_call]; - let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); // No weight is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -640,10 +533,10 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = batch_calls.len() as u64; - let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let batch_len = batch_calls.len() as Weight; + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -651,9 +544,9 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call.clone(), bad_call]; - let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(RuntimeOrigin::signed(1)); + let result = call.dispatch(Origin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); assert_eq!( extract_actual_weight(&result, &info), @@ -666,7 +559,7 @@ fn batch_all_handles_weight_refund() { #[test] fn batch_all_does_not_nest() { new_test_ext().execute_with(|| { - let batch_all = RuntimeCall::Utility(UtilityCall::batch_all { + let batch_all = Call::Utility(UtilityCall::batch_all { calls: vec![call_transfer(2, 1), call_transfer(2, 1), call_transfer(2, 1)], }); @@ -676,7 +569,7 @@ fn batch_all_does_not_nest() { assert_eq!(Balances::free_balance(2), 10); // A nested batch_all call will not pass the filter, and fail with `BadOrigin`. assert_noop!( - Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_all.clone()]), + Utility::batch_all(Origin::signed(1), vec![batch_all.clone()]), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), @@ -689,10 +582,10 @@ fn batch_all_does_not_nest() { // And for those who want to get a little fancy, we check that the filter persists across // other kinds of dispatch wrapping functions... in this case // `batch_all(batch(batch_all(..)))` - let batch_nested = RuntimeCall::Utility(UtilityCall::batch { calls: vec![batch_all] }); + let batch_nested = Call::Utility(UtilityCall::batch { calls: vec![batch_all] }); // Batch will end with `Ok`, but does not actually execute as we can see from the event // and balances. - assert_ok!(Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_nested])); + assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); System::assert_has_event( utility::Event::BatchInterrupted { index: 0, @@ -708,15 +601,9 @@ fn batch_all_does_not_nest() { #[test] fn batch_limit() { new_test_ext().execute_with(|| { - let calls = vec![RuntimeCall::System(SystemCall::remark { remark: vec![] }); 40_000]; - assert_noop!( - Utility::batch(RuntimeOrigin::signed(1), calls.clone()), - Error::::TooManyCalls - ); - assert_noop!( - Utility::batch_all(RuntimeOrigin::signed(1), calls), - Error::::TooManyCalls - ); + let calls = vec![Call::System(SystemCall::remark { remark: vec![] }); 40_000]; + assert_noop!(Utility::batch(Origin::signed(1), calls.clone()), Error::::TooManyCalls); + assert_noop!(Utility::batch_all(Origin::signed(1), calls), Error::::TooManyCalls); }); } @@ -726,14 +613,14 @@ fn force_batch_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::force_batch( - RuntimeOrigin::signed(1), + Origin::signed(1), vec![ call_transfer(2, 5), - call_foobar(true, Weight::from_ref_time(75), None), + call_foobar(true, 75, None), call_transfer(2, 10), call_transfer(2, 5), ] - )); + ),); System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); System::assert_has_event( utility::Event::ItemFailed { error: DispatchError::Other("") }.into(), @@ -742,157 +629,12 @@ fn force_batch_works() { assert_eq!(Balances::free_balance(2), 20); assert_ok!(Utility::force_batch( - RuntimeOrigin::signed(2), + Origin::signed(2), vec![call_transfer(1, 5), call_transfer(1, 5),] - )); + ),); System::assert_last_event(utility::Event::BatchCompleted.into()); - assert_ok!(Utility::force_batch(RuntimeOrigin::signed(1), vec![call_transfer(2, 50),]),); + assert_ok!(Utility::force_batch(Origin::signed(1), vec![call_transfer(2, 50),]),); System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); }); } - -#[test] -fn none_origin_does_not_work() { - new_test_ext().execute_with(|| { - assert_noop!(Utility::force_batch(RuntimeOrigin::none(), vec![]), BadOrigin); - assert_noop!(Utility::batch(RuntimeOrigin::none(), vec![]), BadOrigin); - assert_noop!(Utility::batch_all(RuntimeOrigin::none(), vec![]), BadOrigin); - }) -} - -#[test] -fn batch_doesnt_work_with_inherents() { - new_test_ext().execute_with(|| { - // fails because inherents expect the origin to be none. - assert_ok!(Utility::batch( - RuntimeOrigin::signed(1), - vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 }),] - )); - System::assert_last_event( - utility::Event::BatchInterrupted { - index: 0, - error: frame_system::Error::::CallFiltered.into(), - } - .into(), - ); - }) -} - -#[test] -fn force_batch_doesnt_work_with_inherents() { - new_test_ext().execute_with(|| { - // fails because inherents expect the origin to be none. - assert_ok!(Utility::force_batch( - RuntimeOrigin::root(), - vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 }),] - )); - System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); - }) -} - -#[test] -fn batch_all_doesnt_work_with_inherents() { - new_test_ext().execute_with(|| { - let batch_all = RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 })], - }); - let info = batch_all.get_dispatch_info(); - - // fails because inherents expect the origin to be none. - assert_noop!( - batch_all.dispatch(RuntimeOrigin::signed(1)), - DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { - actual_weight: Some(info.weight), - pays_fee: Pays::Yes - }, - error: frame_system::Error::::CallFiltered.into(), - } - ); - }) -} - -#[test] -fn batch_works_with_council_origin() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Utility(UtilityCall::batch { - calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], - }); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Council::propose( - RuntimeOrigin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len - )); - - assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); - - System::set_block_number(4); - assert_ok!(Council::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight, - proposal_len - )); - - System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { - proposal_hash: hash, - result: Ok(()), - })); - }) -} - -#[test] -fn force_batch_works_with_council_origin() { - new_test_ext().execute_with(|| { - let proposal = RuntimeCall::Utility(UtilityCall::force_batch { - calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], - }); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Council::propose( - RuntimeOrigin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len - )); - - assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); - assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); - - System::set_block_number(4); - assert_ok!(Council::close( - RuntimeOrigin::signed(4), - hash, - 0, - proposal_weight, - proposal_len - )); - - System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { - proposal_hash: hash, - result: Ok(()), - })); - }) -} - -#[test] -fn batch_all_works_with_council_origin() { - new_test_ext().execute_with(|| { - assert_ok!(Utility::batch_all( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(3, 3)), - vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})] - )); - }) -} diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index eac94e44b8dbf..3660a54fb6a8f 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-06-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// target/production/substrate // benchmark // pallet // --chain=dev @@ -35,7 +35,6 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/utility/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -59,32 +58,27 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - // Minimum execution time: 14_470 nanoseconds. - Weight::from_ref_time(17_443_346 as u64) - // Standard Error: 2_037 - .saturating_add(Weight::from_ref_time(3_510_555 as u64).saturating_mul(c as u64)) + (23_113_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_701_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - // Minimum execution time: 6_799 nanoseconds. - Weight::from_ref_time(6_976_000 as u64) + (4_182_000 as Weight) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - // Minimum execution time: 14_630 nanoseconds. - Weight::from_ref_time(24_580_656 as u64) - // Standard Error: 2_202 - .saturating_add(Weight::from_ref_time(3_584_516 as u64).saturating_mul(c as u64)) + (18_682_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_794_000 as Weight).saturating_mul(c as Weight)) } fn dispatch_as() -> Weight { - // Minimum execution time: 16_597 nanoseconds. - Weight::from_ref_time(16_950_000 as u64) + (12_049_000 as Weight) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - // Minimum execution time: 13_885 nanoseconds. - Weight::from_ref_time(20_147_978 as u64) - // Standard Error: 2_232 - .saturating_add(Weight::from_ref_time(3_516_969 as u64).saturating_mul(c as u64)) + (19_136_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_697_000 as Weight).saturating_mul(c as Weight)) } } @@ -92,31 +86,26 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - // Minimum execution time: 14_470 nanoseconds. - Weight::from_ref_time(17_443_346 as u64) - // Standard Error: 2_037 - .saturating_add(Weight::from_ref_time(3_510_555 as u64).saturating_mul(c as u64)) + (23_113_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_701_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - // Minimum execution time: 6_799 nanoseconds. - Weight::from_ref_time(6_976_000 as u64) + (4_182_000 as Weight) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - // Minimum execution time: 14_630 nanoseconds. - Weight::from_ref_time(24_580_656 as u64) - // Standard Error: 2_202 - .saturating_add(Weight::from_ref_time(3_584_516 as u64).saturating_mul(c as u64)) + (18_682_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_794_000 as Weight).saturating_mul(c as Weight)) } fn dispatch_as() -> Weight { - // Minimum execution time: 16_597 nanoseconds. - Weight::from_ref_time(16_950_000 as u64) + (12_049_000 as Weight) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - // Minimum execution time: 13_885 nanoseconds. - Weight::from_ref_time(20_147_978 as u64) - // Standard Error: 2_232 - .saturating_add(Weight::from_ref_time(3_516_969 as u64).saturating_mul(c as u64)) + (19_136_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_697_000 as Weight).saturating_mul(c as Weight)) } } diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 6a64b474d1485..eb902c0633331 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -32,7 +32,6 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/vesting/README.md b/frame/vesting/README.md index b19a60c5b6824..c3800eb994d4d 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -7,8 +7,7 @@ A simple module providing a means of placing a linear curve on an account's locked balance. This module ensures that there is a lock in place preventing the balance to drop below the *unvested* -amount for reason other than the ones specified in `UnvestedFundsAllowedWithdrawReasons` -configuration value. +amount for any reason other than transaction fee payment. As the amount vested increases over time, the amount unvested reduces. However, locks remain in place and explicit action is needed on behalf of the user to ensure that the amount locked is diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index dde5fe3ac7561..2b8150e995240 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -42,7 +42,7 @@ fn add_locks(who: &T::AccountId, n: u8) { } fn add_vesting_schedules( - target: AccountIdLookupOf, + target: ::Source, n: u32, ) -> Result, &'static str> { let min_transfer = T::MinVestedTransfer::get(); @@ -52,7 +52,7 @@ fn add_vesting_schedules( let starting_block = 1u32; let source: T::AccountId = account("source", 0, SEED); - let source_lookup = T::Lookup::unlookup(source.clone()); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); System::::set_block_number(T::BlockNumber::zero()); @@ -81,7 +81,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); @@ -109,7 +109,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); @@ -137,7 +137,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); - let other_lookup = T::Lookup::unlookup(other.clone()); + let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); add_locks::(&other, l as u8); let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; @@ -166,7 +166,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); - let other_lookup = T::Lookup::unlookup(other.clone()); + let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); add_locks::(&other, l as u8); add_vesting_schedules::(other_lookup.clone(), s)?; @@ -198,7 +198,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); - let target_lookup = T::Lookup::unlookup(target.clone()); + let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); // Add one vesting schedules. @@ -232,11 +232,11 @@ benchmarks! { let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let source: T::AccountId = account("source", 0, SEED); - let source_lookup = T::Lookup::unlookup(source.clone()); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); - let target_lookup = T::Lookup::unlookup(target.clone()); + let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); // Add one less than max vesting schedules @@ -270,7 +270,7 @@ benchmarks! { let s in 2 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); // Give target existing locks. add_locks::(&caller, l as u8); // Add max vesting schedules. @@ -320,7 +320,7 @@ benchmarks! { let test_dest: T::AccountId = account("test_dest", 0, SEED); let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); // Give target other locks. add_locks::(&caller, l as u8); // Add max vesting schedules. diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index a92f94baf6cf9..9fb7eb8037916 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -24,8 +24,7 @@ //! //! A simple pallet providing a means of placing a linear curve on an account's locked balance. This //! pallet ensures that there is a lock in place preventing the balance to drop below the *unvested* -//! amount for any reason other than the ones specified in `UnvestedFundsAllowedWithdrawReasons` -//! configuration value. +//! amount for any reason other than transaction fee payment. //! //! As the amount vested increases over time, the amount unvested reduces. However, locks remain in //! place and explicit action is needed on behalf of the user to ensure that the amount locked is @@ -85,7 +84,6 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; const VESTING_ID: LockIdentifier = *b"vesting "; @@ -156,7 +154,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The currency trait. type Currency: LockableCurrency; @@ -171,10 +169,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - /// Reasons that determine under which conditions the balance may drop below - /// the unvested amount. - type UnvestedFundsAllowedWithdrawReasons: Get; - /// Maximum number of vesting schedules an account may have at a given moment. const MAX_VESTING_SCHEDULES: u32; } @@ -254,9 +248,7 @@ pub mod pallet { Vesting::::try_append(who, vesting_info) .expect("Too many vesting schedules at genesis."); - let reasons = - WithdrawReasons::except(T::UnvestedFundsAllowedWithdrawReasons::get()); - + let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, locked, reasons); } } @@ -329,7 +321,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] - pub fn vest_other(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { + pub fn vest_other( + origin: OriginFor, + target: ::Source, + ) -> DispatchResult { ensure_signed(origin)?; let who = T::Lookup::lookup(target)?; Self::do_vest(who) @@ -357,7 +352,7 @@ pub mod pallet { )] pub fn vested_transfer( origin: OriginFor, - target: AccountIdLookupOf, + target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; @@ -388,8 +383,8 @@ pub mod pallet { )] pub fn force_vested_transfer( origin: OriginFor, - source: AccountIdLookupOf, - target: AccountIdLookupOf, + source: ::Source, + target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { ensure_root(origin)?; @@ -499,8 +494,8 @@ impl Pallet { // Execute a vested transfer from `source` to `target` with the given `schedule`. fn do_vested_transfer( - source: AccountIdLookupOf, - target: AccountIdLookupOf, + source: ::Source, + target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { // Validate user inputs. @@ -576,7 +571,7 @@ impl Pallet { T::Currency::remove_lock(VESTING_ID, who); Self::deposit_event(Event::::VestingCompleted { account: who.clone() }); } else { - let reasons = WithdrawReasons::except(T::UnvestedFundsAllowedWithdrawReasons::get()); + let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, total_locked_now, reasons); Self::deposit_event(Event::::VestingUpdated { account: who.clone(), diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index 0bd371a0353f1..9ad8e57500e89 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -17,7 +17,7 @@ use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, WithdrawReasons}, + traits::{ConstU32, ConstU64, GenesisBuild}, }; use sp_core::H256; use sp_runtime::{ @@ -45,7 +45,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; @@ -55,9 +55,9 @@ impl frame_system::Config for Test { type BlockLength = (); type BlockNumber = u64; type BlockWeights = (); - type RuntimeCall = RuntimeCall; + type Call = Call; type DbWeight = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Hash = H256; type Hashing = BlakeTwo256; type Header = Header; @@ -67,7 +67,7 @@ impl frame_system::Config for Test { type OnNewAccount = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type PalletInfo = PalletInfo; type SS58Prefix = (); type SystemWeightInfo = (); @@ -78,7 +78,7 @@ impl pallet_balances::Config for Test { type AccountStore = System; type Balance = u64; type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type MaxLocks = ConstU32<10>; type MaxReserves = (); @@ -87,18 +87,15 @@ impl pallet_balances::Config for Test { } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); pub static ExistentialDeposit: u64 = 0; } impl Config for Test { type BlockNumberToBalance = Identity; type Currency = Balances; - type RuntimeEvent = RuntimeEvent; + type Event = Event; const MAX_VESTING_SCHEDULES: u32 = 3; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; } pub struct ExtBuilder { diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 5462445414719..4596157e63b7b 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_vesting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/vesting/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/vesting/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,121 +59,97 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 45_113 nanoseconds. - Weight::from_ref_time(44_114_539 as u64) - // Standard Error: 958 - .saturating_add(Weight::from_ref_time(56_239 as u64).saturating_mul(l as u64)) - // Standard Error: 1_704 - .saturating_add(Weight::from_ref_time(64_926 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (32_978_000 as Weight) + // Standard Error: 1_000 + .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 43_918 nanoseconds. - Weight::from_ref_time(43_452_573 as u64) - // Standard Error: 984 - .saturating_add(Weight::from_ref_time(50_162 as u64).saturating_mul(l as u64)) - // Standard Error: 1_752 - .saturating_add(Weight::from_ref_time(42_080 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (32_856_000 as Weight) + // Standard Error: 1_000 + .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 43_603 nanoseconds. - Weight::from_ref_time(42_696_097 as u64) - // Standard Error: 996 - .saturating_add(Weight::from_ref_time(65_316 as u64).saturating_mul(l as u64)) - // Standard Error: 1_772 - .saturating_add(Weight::from_ref_time(65_862 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (33_522_000 as Weight) + // Standard Error: 1_000 + .saturating_add((74_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 43_099 nanoseconds. - Weight::from_ref_time(42_937_914 as u64) - // Standard Error: 884 - .saturating_add(Weight::from_ref_time(52_079 as u64).saturating_mul(l as u64)) - // Standard Error: 1_573 - .saturating_add(Weight::from_ref_time(36_274 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (32_558_000 as Weight) + // Standard Error: 1_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 59_023 nanoseconds. - Weight::from_ref_time(59_606_862 as u64) - // Standard Error: 2_078 - .saturating_add(Weight::from_ref_time(55_335 as u64).saturating_mul(l as u64)) - // Standard Error: 3_698 - .saturating_add(Weight::from_ref_time(26_743 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (49_260_000 as Weight) + // Standard Error: 1_000 + .saturating_add((80_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 58_249 nanoseconds. - Weight::from_ref_time(59_025_976 as u64) - // Standard Error: 2_078 - .saturating_add(Weight::from_ref_time(55_736 as u64).saturating_mul(l as u64)) - // Standard Error: 3_697 - .saturating_add(Weight::from_ref_time(24_903 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + (49_166_000 as Weight) + // Standard Error: 2_000 + .saturating_add((77_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 4_000 + .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 45_279 nanoseconds. - Weight::from_ref_time(44_197_440 as u64) - // Standard Error: 946 - .saturating_add(Weight::from_ref_time(62_308 as u64).saturating_mul(l as u64)) - // Standard Error: 1_747 - .saturating_add(Weight::from_ref_time(64_473 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (34_042_000 as Weight) + // Standard Error: 1_000 + .saturating_add((83_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((80_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 44_925 nanoseconds. - Weight::from_ref_time(44_219_676 as u64) - // Standard Error: 889 - .saturating_add(Weight::from_ref_time(60_311 as u64).saturating_mul(l as u64)) - // Standard Error: 1_641 - .saturating_add(Weight::from_ref_time(63_095 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (33_937_000 as Weight) + // Standard Error: 1_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((73_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } } @@ -184,120 +157,96 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 45_113 nanoseconds. - Weight::from_ref_time(44_114_539 as u64) - // Standard Error: 958 - .saturating_add(Weight::from_ref_time(56_239 as u64).saturating_mul(l as u64)) - // Standard Error: 1_704 - .saturating_add(Weight::from_ref_time(64_926 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (32_978_000 as Weight) + // Standard Error: 1_000 + .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 43_918 nanoseconds. - Weight::from_ref_time(43_452_573 as u64) - // Standard Error: 984 - .saturating_add(Weight::from_ref_time(50_162 as u64).saturating_mul(l as u64)) - // Standard Error: 1_752 - .saturating_add(Weight::from_ref_time(42_080 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (32_856_000 as Weight) + // Standard Error: 1_000 + .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 43_603 nanoseconds. - Weight::from_ref_time(42_696_097 as u64) - // Standard Error: 996 - .saturating_add(Weight::from_ref_time(65_316 as u64).saturating_mul(l as u64)) - // Standard Error: 1_772 - .saturating_add(Weight::from_ref_time(65_862 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (33_522_000 as Weight) + // Standard Error: 1_000 + .saturating_add((74_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 43_099 nanoseconds. - Weight::from_ref_time(42_937_914 as u64) - // Standard Error: 884 - .saturating_add(Weight::from_ref_time(52_079 as u64).saturating_mul(l as u64)) - // Standard Error: 1_573 - .saturating_add(Weight::from_ref_time(36_274 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (32_558_000 as Weight) + // Standard Error: 1_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 59_023 nanoseconds. - Weight::from_ref_time(59_606_862 as u64) - // Standard Error: 2_078 - .saturating_add(Weight::from_ref_time(55_335 as u64).saturating_mul(l as u64)) - // Standard Error: 3_698 - .saturating_add(Weight::from_ref_time(26_743 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (49_260_000 as Weight) + // Standard Error: 1_000 + .saturating_add((80_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 58_249 nanoseconds. - Weight::from_ref_time(59_025_976 as u64) - // Standard Error: 2_078 - .saturating_add(Weight::from_ref_time(55_736 as u64).saturating_mul(l as u64)) - // Standard Error: 3_697 - .saturating_add(Weight::from_ref_time(24_903 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + (49_166_000 as Weight) + // Standard Error: 2_000 + .saturating_add((77_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 4_000 + .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 45_279 nanoseconds. - Weight::from_ref_time(44_197_440 as u64) - // Standard Error: 946 - .saturating_add(Weight::from_ref_time(62_308 as u64).saturating_mul(l as u64)) - // Standard Error: 1_747 - .saturating_add(Weight::from_ref_time(64_473 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (34_042_000 as Weight) + // Standard Error: 1_000 + .saturating_add((83_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((80_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - /// The range of component `l` is `[0, 49]`. - /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - // Minimum execution time: 44_925 nanoseconds. - Weight::from_ref_time(44_219_676 as u64) - // Standard Error: 889 - .saturating_add(Weight::from_ref_time(60_311 as u64).saturating_mul(l as u64)) - // Standard Error: 1_641 - .saturating_add(Weight::from_ref_time(63_095 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (33_937_000 as Weight) + // Standard Error: 1_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((73_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } } diff --git a/frame/whitelist/Cargo.toml b/frame/whitelist/Cargo.toml index 895a6e753816d..daee560904a08 100644 --- a/frame/whitelist/Cargo.toml +++ b/frame/whitelist/Cargo.toml @@ -31,7 +31,6 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ - "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/whitelist/src/benchmarking.rs b/frame/whitelist/src/benchmarking.rs index e0a758b2ddfdf..cafd1668819dd 100644 --- a/frame/whitelist/src/benchmarking.rs +++ b/frame/whitelist/src/benchmarking.rs @@ -33,7 +33,7 @@ benchmarks! { whitelist_call { let origin = T::WhitelistOrigin::successful_origin(); let call_hash = Default::default(); - }: _(origin, call_hash) + }: _(origin, call_hash) verify { ensure!( WhitelistedCall::::contains_key(call_hash), @@ -50,7 +50,7 @@ benchmarks! { let call_hash = Default::default(); Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); - }: _(origin, call_hash) + }: _(origin, call_hash) verify { ensure!( !WhitelistedCall::::contains_key(call_hash), @@ -71,7 +71,7 @@ benchmarks! { let remark_len = >::MaxSize::get() - 10; let remark = sp_std::vec![1u8; remark_len as usize]; - let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); + let call: ::Call = frame_system::Call::remark { remark }.into(); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_hash = T::Hashing::hash(&encoded_call[..]); @@ -82,7 +82,7 @@ benchmarks! { let encoded_call = encoded_call.try_into().expect("encoded_call must be small enough"); T::PreimageProvider::note_preimage(encoded_call); - }: _(origin, call_hash, call_weight) + }: _(origin, call_hash, call_weight) verify { ensure!( !WhitelistedCall::::contains_key(call_hash), @@ -100,12 +100,12 @@ benchmarks! { let origin = T::DispatchWhitelistedOrigin::successful_origin(); let remark = sp_std::vec![1u8; n as usize]; - let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); + let call: ::Call = frame_system::Call::remark { remark }.into(); let call_hash = T::Hashing::hash_of(&call); Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); - }: _(origin, Box::new(call)) + }: _(origin, Box::new(call)) verify { ensure!( !WhitelistedCall::::contains_key(call_hash), diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index be5fdf9e472b3..239f0fd280160 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -26,8 +26,8 @@ //! and allow another configurable origin: [`Config::DispatchWhitelistedOrigin`] to dispatch them //! with the root origin. //! -//! In the meantime the call corresponding to the hash must have been submitted to the pre-image -//! handler [`PreimageProvider`]. +//! In the meantime the call corresponding to the hash must have been submitted to the to the +//! pre-image handler [`PreimageProvider`]. #![cfg_attr(not(feature = "std"), no_std)] @@ -38,14 +38,12 @@ mod mock; #[cfg(test)] mod tests; pub mod weights; -pub use weights::WeightInfo; use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ - dispatch::{GetDispatchInfo, PostDispatchInfo}, ensure, traits::{PreimageProvider, PreimageRecipient}, - weights::Weight, + weights::{GetDispatchInfo, PostDispatchInfo, Weight}, }; use scale_info::TypeInfo; use sp_runtime::traits::{Dispatchable, Hash}; @@ -56,17 +54,18 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; + use crate::weights::WeightInfo; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type Event: From> + IsType<::Event>; /// The overarching call type. - type RuntimeCall: IsType<::RuntimeCall> - + Dispatchable + type Call: IsType<::Call> + + Dispatchable + GetDispatchInfo + FullCodec + TypeInfo @@ -74,10 +73,10 @@ pub mod pallet { + Parameter; /// Required origin for whitelisting a call. - type WhitelistOrigin: EnsureOrigin; + type WhitelistOrigin: EnsureOrigin; /// Required origin for dispatching whitelisted call with root origin. - type DispatchWhitelistedOrigin: EnsureOrigin; + type DispatchWhitelistedOrigin: EnsureOrigin; /// The handler of pre-images. // NOTE: recipient is only needed for benchmarks. @@ -166,14 +165,14 @@ pub mod pallet { let call = T::PreimageProvider::get_preimage(&call_hash) .ok_or(Error::::UnavailablePreImage)?; - let call = ::RuntimeCall::decode_all_with_depth_limit( + let call = ::Call::decode_all_with_depth_limit( sp_api::MAX_EXTRINSIC_DEPTH, &mut &call[..], ) .map_err(|_| Error::::UndecodableCall)?; ensure!( - call.get_dispatch_info().weight.all_lte(call_weight_witness), + call.get_dispatch_info().weight <= call_weight_witness, Error::::InvalidCallWeightWitness ); @@ -192,7 +191,7 @@ pub mod pallet { })] pub fn dispatch_whitelisted_call_with_preimage( origin: OriginFor, - call: Box<::RuntimeCall>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { T::DispatchWhitelistedOrigin::ensure_origin(origin)?; @@ -217,7 +216,7 @@ impl Pallet { /// Clean whitelisting/preimage and dispatch call. /// /// Return the call actual weight of the dispatched call if there is some. - fn clean_and_dispatch(call_hash: T::Hash, call: ::RuntimeCall) -> Option { + fn clean_and_dispatch(call_hash: T::Hash, call: ::Call) -> Option { WhitelistedCall::::remove(call_hash); T::PreimageProvider::unrequest_preimage(&call_hash); diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index d4446cb8031ab..634db53a09a4e 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -51,23 +51,23 @@ construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max(1024); } impl frame_system::Config for Test { type BaseCallFilter = Nothing; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -85,7 +85,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -93,17 +93,18 @@ impl pallet_balances::Config for Test { } impl pallet_preimage::Config for Test { - type RuntimeEvent = RuntimeEvent; + type Event = Event; type Currency = Balances; type ManagerOrigin = EnsureRoot; + type MaxSize = ConstU32<{ 4096 * 1024 }>; // PreimageMaxSize Taken from Polkadot as reference. type BaseDeposit = ConstU64<1>; type ByteDeposit = ConstU64<1>; type WeightInfo = (); } impl pallet_whitelist::Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; + type Event = Event; + type Call = Call; type WhitelistOrigin = EnsureRoot; type DispatchWhitelistedOrigin = EnsureRoot; type PreimageProvider = Preimage; diff --git a/frame/whitelist/src/tests.rs b/frame/whitelist/src/tests.rs index fd6558e83f30e..67bccaeaeebe1 100644 --- a/frame/whitelist/src/tests.rs +++ b/frame/whitelist/src/tests.rs @@ -19,48 +19,46 @@ use crate::mock::*; use codec::Encode; -use frame_support::{ - assert_noop, assert_ok, dispatch::GetDispatchInfo, traits::PreimageProvider, weights::Weight, -}; +use frame_support::{assert_noop, assert_ok, dispatch::GetDispatchInfo, traits::PreimageProvider}; use sp_runtime::{traits::Hash, DispatchError}; #[test] fn test_whitelist_call_and_remove() { new_test_ext().execute_with(|| { - let call = RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let call = Call::System(frame_system::Call::remark { remark: vec![] }); let encoded_call = call.encode(); let call_hash = ::Hashing::hash(&encoded_call[..]); assert_noop!( - Whitelist::remove_whitelisted_call(RuntimeOrigin::root(), call_hash), + Whitelist::remove_whitelisted_call(Origin::root(), call_hash), crate::Error::::CallIsNotWhitelisted, ); assert_noop!( - Whitelist::whitelist_call(RuntimeOrigin::signed(1), call_hash), + Whitelist::whitelist_call(Origin::signed(1), call_hash), DispatchError::BadOrigin, ); - assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); + assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); assert!(Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash), + Whitelist::whitelist_call(Origin::root(), call_hash), crate::Error::::CallAlreadyWhitelisted, ); assert_noop!( - Whitelist::remove_whitelisted_call(RuntimeOrigin::signed(1), call_hash), + Whitelist::remove_whitelisted_call(Origin::signed(1), call_hash), DispatchError::BadOrigin, ); - assert_ok!(Whitelist::remove_whitelisted_call(RuntimeOrigin::root(), call_hash)); + assert_ok!(Whitelist::remove_whitelisted_call(Origin::root(), call_hash)); assert!(!Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::remove_whitelisted_call(RuntimeOrigin::root(), call_hash), + Whitelist::remove_whitelisted_call(Origin::root(), call_hash), crate::Error::::CallIsNotWhitelisted, ); }); @@ -69,51 +67,43 @@ fn test_whitelist_call_and_remove() { #[test] fn test_whitelist_call_and_execute() { new_test_ext().execute_with(|| { - let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); + let call = Call::System(frame_system::Call::remark_with_event { remark: vec![1] }); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_hash = ::Hashing::hash(&encoded_call[..]); assert_noop!( - Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), crate::Error::::CallIsNotWhitelisted, ); - assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); + assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(RuntimeOrigin::signed(1), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(Origin::signed(1), call_hash, call_weight), DispatchError::BadOrigin, ); assert_noop!( - Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), crate::Error::::UnavailablePreImage, ); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::root(), encoded_call)); + assert_ok!(Preimage::note_preimage(Origin::root(), encoded_call)); assert!(Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call( - RuntimeOrigin::root(), - call_hash, - call_weight - Weight::from_ref_time(1) - ), + Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight - 1), crate::Error::::InvalidCallWeightWitness, ); - assert_ok!(Whitelist::dispatch_whitelisted_call( - RuntimeOrigin::root(), - call_hash, - call_weight - )); + assert_ok!(Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight)); assert!(!Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), crate::Error::::CallIsNotWhitelisted, ); }); @@ -122,22 +112,18 @@ fn test_whitelist_call_and_execute() { #[test] fn test_whitelist_call_and_execute_failing_call() { new_test_ext().execute_with(|| { - let call = RuntimeCall::Whitelist(crate::Call::dispatch_whitelisted_call { + let call = Call::Whitelist(crate::Call::dispatch_whitelisted_call { call_hash: Default::default(), - call_weight_witness: Weight::zero(), + call_weight_witness: 0, }); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_hash = ::Hashing::hash(&encoded_call[..]); - assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::root(), encoded_call)); + assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); + assert_ok!(Preimage::note_preimage(Origin::root(), encoded_call)); assert!(Preimage::preimage_requested(&call_hash)); - assert_ok!(Whitelist::dispatch_whitelisted_call( - RuntimeOrigin::root(), - call_hash, - call_weight - )); + assert_ok!(Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight)); assert!(!Preimage::preimage_requested(&call_hash)); }); } @@ -145,23 +131,22 @@ fn test_whitelist_call_and_execute_failing_call() { #[test] fn test_whitelist_call_and_execute_without_note_preimage() { new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::System(frame_system::Call::remark_with_event { - remark: vec![1], - })); + let call = + Box::new(Call::System(frame_system::Call::remark_with_event { remark: vec![1] })); let call_hash = ::Hashing::hash_of(&call); - assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); + assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); assert!(Preimage::preimage_requested(&call_hash)); assert_ok!(Whitelist::dispatch_whitelisted_call_with_preimage( - RuntimeOrigin::root(), + Origin::root(), call.clone() )); assert!(!Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call_with_preimage(RuntimeOrigin::root(), call), + Whitelist::dispatch_whitelisted_call_with_preimage(Origin::root(), call), crate::Error::::CallIsNotWhitelisted, ); }); @@ -170,7 +155,7 @@ fn test_whitelist_call_and_execute_without_note_preimage() { #[test] fn test_whitelist_call_and_execute_decode_consumes_all() { new_test_ext().execute_with(|| { - let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); + let call = Call::System(frame_system::Call::remark_with_event { remark: vec![1] }); let call_weight = call.get_dispatch_info().weight; let mut call = call.encode(); // Appending something does not make the encoded call invalid. @@ -179,11 +164,11 @@ fn test_whitelist_call_and_execute_decode_consumes_all() { let call_hash = ::Hashing::hash(&call[..]); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::root(), call)); - assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); + assert_ok!(Preimage::note_preimage(Origin::root(), call)); + assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), crate::Error::::UndecodableCall, ); }); diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index 22d238d0fdd0f..81482c35e3de8 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,7 @@ //! Autogenerated weights for pallet_whitelist //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -33,10 +32,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./frame/whitelist/src/weights.rs -// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs +// --output=./frame/whitelist/src/weights.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,38 +56,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - // Minimum execution time: 26_352 nanoseconds. - Weight::from_ref_time(26_727_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (20_938_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - // Minimum execution time: 25_536 nanoseconds. - Weight::from_ref_time(25_969_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (22_332_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - // Minimum execution time: 4_802_466 nanoseconds. - Weight::from_ref_time(4_820_197_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + (5_989_917_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - /// The range of component `n` is `[1, 10000]`. + // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - // Minimum execution time: 29_184 nanoseconds. - Weight::from_ref_time(30_530_970 as u64) - // Standard Error: 7 - .saturating_add(Weight::from_ref_time(1_496 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + (25_325_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } } @@ -99,37 +93,34 @@ impl WeightInfo for () { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - // Minimum execution time: 26_352 nanoseconds. - Weight::from_ref_time(26_727_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (20_938_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - // Minimum execution time: 25_536 nanoseconds. - Weight::from_ref_time(25_969_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (22_332_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) + // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - // Minimum execution time: 4_802_466 nanoseconds. - Weight::from_ref_time(4_820_197_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + (5_989_917_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - /// The range of component `n` is `[1, 10000]`. + // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - // Minimum execution time: 29_184 nanoseconds. - Weight::from_ref_time(30_530_970 as u64) - // Standard Error: 7 - .saturating_add(Weight::from_ref_time(1_496 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + (25_325_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index a322799048a31..f3d091266d5d4 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -19,8 +19,7 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-version = { version = "5.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "0.12.0", default-features = false, optional = true, path = "../state-machine" } -sp-trie = { version = "6.0.0", default-features = false, optional = true, path = "../trie" } +sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machine" } hash-db = { version = "0.15.2", optional = true } thiserror = { version = "1.0.30", optional = true } @@ -36,8 +35,7 @@ std = [ "sp-core/std", "sp-std/std", "sp-runtime/std", - "sp-state-machine/std", - "sp-trie/std", + "sp-state-machine", "sp-version/std", "hash-db", "thiserror", diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 8acc15d6a0591..0a57be8d7a300 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ proc-macro = true quote = "1.0.10" syn = { version = "1.0.98", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.37" -blake2 = { version = "0.10.4", default-features = false } +blake2 = { version = "0.10.2", default-features = false } proc-macro-crate = "1.1.3" # Required for the doc tests diff --git a/primitives/api/proc-macro/src/common.rs b/primitives/api/proc-macro/src/common.rs deleted file mode 100644 index 10887be613278..0000000000000 --- a/primitives/api/proc-macro/src/common.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2024 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/// The ident used for the block generic parameter. -pub const BLOCK_GENERIC_IDENT: &str = "Block"; - -/// Unique identifier used to make the hidden includes unique for this macro. -pub const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; - -/// The `core_trait` attribute. -pub const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; -/// The `api_version` attribute. -/// -/// Is used to set the current version of the trait. -pub const API_VERSION_ATTRIBUTE: &str = "api_version"; -/// The `changed_in` attribute. -/// -/// Is used when the function signature changed between different versions of a trait. -/// This attribute should be placed on the old signature of the function. -pub const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; -/// The `renamed` attribute. -/// -/// Is used when a trait method was renamed. -pub const RENAMED_ATTRIBUTE: &str = "renamed"; -/// All attributes that we support in the declaration of a runtime api trait. -pub const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = - &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 8d46047dbda5a..b031c0f8bb1cc 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -16,15 +16,11 @@ // limitations under the License. use crate::utils::{ - extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, - generate_hidden_includes, generate_runtime_mod_name_for_trait, parse_runtime_api_version, - prefix_function_with_trait, replace_wild_card_parameter_names, return_type_extract_type, - versioned_trait_name, AllowSelfRefInParameters, -}; - -use crate::common::{ - API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, - HIDDEN_INCLUDES_ID, RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, + generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, + generate_runtime_mod_name_for_trait, prefix_function_with_trait, + replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, }; use proc_macro2::{Span, TokenStream}; @@ -37,11 +33,36 @@ use syn::{ parse_macro_input, parse_quote, spanned::Spanned, visit::{self, Visit}, - Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, TraitBound, - TraitItem, TraitItemMethod, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, + TraitBound, TraitItem, TraitItemMethod, Type, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; + +/// The ident used for the block generic parameter. +const BLOCK_GENERIC_IDENT: &str = "Block"; + +/// Unique identifier used to make the hidden includes unique for this macro. +const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; + +/// The `core_trait` attribute. +const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; +/// The `api_version` attribute. +/// +/// Is used to set the current version of the trait. +const API_VERSION_ATTRIBUTE: &str = "api_version"; +/// The `changed_in` attribute. +/// +/// Is used when the function signature changed between different versions of a trait. +/// This attribute should be placed on the old signature of the function. +const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; +/// The `renamed` attribute. +/// +/// Is used when a trait method was renamed. +const RENAMED_ATTRIBUTE: &str = "renamed"; +/// All attributes that we support in the declaration of a runtime api trait. +const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = + &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { @@ -98,6 +119,20 @@ impl<'ast> Visit<'ast> for IsUsingBlock { } } +/// Visits the ast and checks if `Block` ident is used somewhere. +fn type_is_using_block(ty: &Type) -> bool { + let mut visitor = IsUsingBlock { result: false }; + visitor.visit_type(ty); + visitor.result +} + +/// Visits the ast and checks if `Block` ident is used somewhere. +fn return_type_is_using_block(ty: &ReturnType) -> bool { + let mut visitor = IsUsingBlock { result: false }; + visitor.visit_return_type(ty); + visitor.result +} + /// Replace all occurrences of `Block` with `NodeBlock` struct ReplaceBlockWithNodeBlock {} @@ -111,69 +146,146 @@ impl Fold for ReplaceBlockWithNodeBlock { } } -/// Versioned API traits are used to catch missing methods when implementing a specific version of a -/// versioned API. They contain all non-versioned methods (aka stable methods) from the main trait -/// and all versioned methods for the specific version. This means that there is one trait for each -/// version mentioned in the trait definition. For example: -/// ```ignore -/// // The trait version implicitly is 1 -/// decl_runtime_apis!( -/// trait SomeApi { -/// fn method1(); // this is a 'stable method' -/// -/// #[api_version(2)] -/// fn method2(); -/// -/// #[api_version(2)] -/// fn method3(); -/// -/// #[api_version(3)] -/// fn method4(); -/// } -/// ); -/// ``` -/// This trait has got three different versions. The function below will generate the following -/// code: -/// ``` -/// trait SomeApiV1 { -/// // in V1 only the stable methods are required. The rest has got default implementations. -/// fn method1(); -/// } -/// -/// trait SomeApiV2 { -/// // V2 contains all methods from V1 and V2. V3 not required so they are skipped. -/// fn method1(); -/// fn method2(); -/// fn method3(); -/// } -/// -/// trait SomeApiV3 { -/// // And V3 contains all methods from the trait. -/// fn method1(); -/// fn method2(); -/// fn method3(); -/// fn method4(); -/// } -/// ``` -fn generate_versioned_api_traits( - api: ItemTrait, - methods: BTreeMap>, -) -> Vec { - let mut result = Vec::::new(); - for (version, _) in &methods { - let mut versioned_trait = api.clone(); - versioned_trait.ident = versioned_trait_name(&versioned_trait.ident, *version); - versioned_trait.items = Vec::new(); - // Add the methods from the current version and all previous one. Versions are sorted so - // it's safe to stop early. - for (_, m) in methods.iter().take_while(|(v, _)| v <= &version) { - versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Method(m))); +/// Replace all occurrences of `Block` with `NodeBlock` +fn fn_arg_replace_block_with_node_block(fn_arg: FnArg) -> FnArg { + let mut replace = ReplaceBlockWithNodeBlock {}; + fold::fold_fn_arg(&mut replace, fn_arg) +} + +/// Replace all occurrences of `Block` with `NodeBlock` +fn return_type_replace_block_with_node_block(return_type: ReturnType) -> ReturnType { + let mut replace = ReplaceBlockWithNodeBlock {}; + fold::fold_return_type(&mut replace, return_type) +} + +/// Generate the functions that generate the native call closure for each trait method. +fn generate_native_call_generators(decl: &ItemTrait) -> Result { + let fns = decl.items.iter().filter_map(|i| match i { + TraitItem::Method(ref m) => Some(&m.sig), + _ => None, + }); + + let mut result = Vec::new(); + let trait_ = &decl.ident; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Auxiliary function that is used to convert between types that use different block types. + // The function expects that both are convertible by encoding the one and decoding the other. + result.push(quote!( + #[cfg(any(feature = "std", test))] + fn convert_between_block_types + #crate_::ApiError>( + input: &I, + map_error: F, + ) -> std::result::Result + { + ::decode_with_depth_limit( + #crate_::MAX_EXTRINSIC_DEPTH, + &mut &#crate_::Encode::encode(input)[..], + ).map_err(map_error) } + )); + + // Generate a native call generator for each function of the given trait. + for fn_ in fns { + let params = extract_parameter_names_types_and_borrows(fn_, AllowSelfRefInParameters::No)?; + let trait_fn_name = &fn_.ident; + let function_name_str = fn_.ident.to_string(); + let fn_name = generate_native_call_generator_fn_name(&fn_.ident); + let output = return_type_replace_block_with_node_block(fn_.output.clone()); + let output_ty = return_type_extract_type(&output); + let output = quote!( std::result::Result<#output_ty, #crate_::ApiError> ); + + // Every type that is using the `Block` generic parameter, we need to encode/decode, + // to make it compatible between the runtime/node. + let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { + let param_name = quote!(#n).to_string(); + + quote!( + let #n: #t = convert_between_block_types( + &#n, + |e| #crate_::ApiError::FailedToConvertParameter { + function: #function_name_str, + parameter: #param_name, + error: e, + }, + )?; + ) + }); + // Same as for the input types, we need to check if we also need to convert the output, + // before returning it. + let output_conversion = if return_type_is_using_block(&fn_.output) { + quote!( + convert_between_block_types( + &res, + |e| #crate_::ApiError::FailedToConvertReturnValue { + function: #function_name_str, + error: e, + }, + ) + ) + } else { + quote!(Ok(res)) + }; + + let input_names = params.iter().map(|v| &v.0); + // If the type is using the block generic type, we will encode/decode it to make it + // compatible. To ensure that we forward it by ref/value, we use the value given by the + // the user. Otherwise if it is not using the block, we don't need to add anything. + let input_borrows = + params.iter().map(|v| if type_is_using_block(&v.1) { v.2 } else { None }); + + // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect + // all the function inputs. + let fn_inputs = fn_ + .inputs + .iter() + .map(|v| fn_arg_replace_block_with_node_block(v.clone())) + .map(|v| match v { + FnArg::Typed(ref arg) => { + let mut arg = arg.clone(); + if let Type::Reference(ref mut r) = *arg.ty { + r.lifetime = Some(parse_quote!( 'a )); + } + FnArg::Typed(arg) + }, + r => r, + }); + + let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); + // We need to parse them again, to get an easy access to the actual parameters. + let impl_generics: Generics = parse_quote!( #impl_generics ); + let impl_generics_params = impl_generics.params.iter().map(|p| { + match p { + GenericParam::Type(ref ty) => { + let mut ty = ty.clone(); + ty.bounds.push(parse_quote!( 'a )); + GenericParam::Type(ty) + }, + // We should not see anything different than type params here. + r => r.clone(), + } + }); - result.push(versioned_trait); + // Generate the generator function + result.push(quote!( + #[cfg(any(feature = "std", test))] + pub fn #fn_name< + 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::BlockT + #(, #impl_generics_params)* + >( + #( #fn_inputs ),* + ) -> impl FnOnce() -> #output + 'a #where_clause { + move || { + #( #conversions )* + let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); + #output_conversion + } + } + )); } - result + Ok(quote!( #( #result )* )) } /// Try to parse the given `Attribute` as `renamed` attribute. @@ -211,13 +323,126 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { } } +/// Generate the functions that call the api at a given block for a given trait method. +fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { + let fns = decl.items.iter().filter_map(|i| match i { + TraitItem::Method(ref m) => Some((&m.attrs, &m.sig)), + _ => None, + }); + + let mut result = Vec::new(); + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Generate a native call generator for each function of the given trait. + for (attrs, fn_) in fns { + let trait_name = &decl.ident; + let trait_fn_name = prefix_function_with_trait(trait_name, &fn_.ident); + let fn_name = generate_call_api_at_fn_name(&fn_.ident); + + let attrs = remove_supported_attributes(&mut attrs.clone()); + + if attrs.contains_key(RENAMED_ATTRIBUTE) && attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + return Err(Error::new( + fn_.span(), + format!( + "`{}` and `{}` are not supported at once.", + RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE + ), + )) + } + + // We do not need to generate this function for a method that signature was changed. + if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + continue + } + + // Parse the renamed attributes. + let mut renames = Vec::new(); + if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { + let (old_name, version) = parse_renamed_attribute(a)?; + renames.push((version, prefix_function_with_trait(trait_name, &old_name))); + } + + renames.sort_by(|l, r| r.cmp(l)); + let (versions, old_names) = renames.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut versions, mut old_names), (version, old_name)| { + versions.push(version); + old_names.push(old_name); + (versions, old_names) + }, + ); + + // Generate the generator function + result.push(quote!( + #[cfg(any(feature = "std", test))] + #[allow(clippy::too_many_arguments)] + pub fn #fn_name< + R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, + NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, + Block: #crate_::BlockT, + T: #crate_::CallApiAt, + >( + call_runtime_at: &T, + at: &#crate_::BlockId, + args: std::vec::Vec, + changes: &std::cell::RefCell<#crate_::OverlayedChanges>, + storage_transaction_cache: &std::cell::RefCell< + #crate_::StorageTransactionCache + >, + native_call: std::option::Option, + context: #crate_::ExecutionContext, + recorder: &std::option::Option<#crate_::ProofRecorder>, + ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { + let version = call_runtime_at.runtime_version_at(at)?; + + #( + // Check if we need to call the function by an old name. + if version.apis.iter().any(|(s, v)| { + s == &ID && *v < #versions + }) { + let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { + at, + function: #old_names, + native_call: None, + arguments: args, + overlayed_changes: changes, + storage_transaction_cache, + context, + recorder, + }; + + let ret = #crate_::CallApiAt::::call_api_at(call_runtime_at, params)?; + + return Ok(ret) + } + )* + + let params = #crate_::CallApiAtParams { + at, + function: #trait_fn_name, + native_call, + arguments: args, + overlayed_changes: changes, + storage_transaction_cache, + context, + recorder, + }; + + #crate_::CallApiAt::::call_api_at(call_runtime_at, params) + } + )); + } + + Ok(quote!( #( #result )* )) +} + /// Generate the declaration of the trait for the runtime. fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let mut result = Vec::new(); for decl in decls { let mut decl = decl.clone(); - let decl_span = decl.span(); extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); @@ -225,73 +450,30 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); - let trait_api_version = get_api_version(&found_attributes)?; - - let mut methods_by_version: BTreeMap> = BTreeMap::new(); - - // Process the items in the declaration. The filter_map function below does a lot of stuff - // because the method attributes are stripped at this point - decl.items.iter_mut().for_each(|i| match i { - TraitItem::Method(ref mut method) => { - let method_attrs = remove_supported_attributes(&mut method.attrs); - let mut method_version = trait_api_version; - // validate the api version for the method (if any) and generate default - // implementation for versioned methods - if let Some(version_attribute) = method_attrs.get(API_VERSION_ATTRIBUTE) { - method_version = match parse_runtime_api_version(version_attribute) { - Ok(method_api_ver) if method_api_ver < trait_api_version => { - let method_ver = method_api_ver.to_string(); - let trait_ver = trait_api_version.to_string(); - let mut err1 = Error::new( - version_attribute.span(), - format!( - "Method version `{}` is older than (or equal to) trait version `{}`.\ - Methods can't define versions older than the trait version.", - method_ver, - trait_ver, - ), - ); - - let err2 = match found_attributes.get(&API_VERSION_ATTRIBUTE) { - Some(attr) => Error::new(attr.span(), "Trait version is set here."), - None => Error::new( - decl_span, - "Trait version is not set so it is implicitly equal to 1.", - ), - }; - err1.combine(err2); - result.push(err1.to_compile_error()); - - trait_api_version - }, - Ok(method_api_ver) => method_api_ver, - Err(e) => { - result.push(e.to_compile_error()); - trait_api_version - }, - }; - } - - // Any method with the `changed_in` attribute isn't required for the runtime - // anymore. - if !method_attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - - // partition methods by api version - methods_by_version.entry(method_version).or_default().push(method.clone()); - } - }, - _ => (), - }); - - let versioned_api_traits = generate_versioned_api_traits(decl.clone(), methods_by_version); + let call_api_at_calls = generate_call_api_at_calls(&decl)?; + + // Remove methods that have the `changed_in` attribute as they are not required for the + // runtime anymore. + decl.items = decl + .items + .iter_mut() + .filter_map(|i| match i { + TraitItem::Method(ref mut method) => { + if remove_supported_attributes(&mut method.attrs) + .contains_key(CHANGED_IN_ATTRIBUTE) + { + None + } else { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + Some(TraitItem::Method(method.clone())) + } + }, + r => Some(r.clone()), + }) + .collect(); - let main_api_ident = decl.ident.clone(); - let versioned_ident = &versioned_api_traits - .first() - .expect("There should always be at least one version.") - .ident; + let native_call_generators = generate_native_call_generators(&decl)?; result.push(quote!( #[doc(hidden)] @@ -300,13 +482,15 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { pub mod #mod_name { use super::*; - #( #versioned_api_traits )* - - pub use #versioned_ident as #main_api_ident; + #decl pub #api_version pub #id + + #native_call_generators + + #call_api_at_calls } )); } @@ -325,45 +509,18 @@ struct ToClientSideDecl<'a> { } impl<'a> ToClientSideDecl<'a> { - /// Process the given [`ItemTrait`]. - fn process(mut self, decl: ItemTrait) -> ItemTrait { - let mut decl = self.fold_item_trait(decl); - - let block_id = self.block_id; - let crate_ = self.crate_; - - // Add the special method that will be implemented by the `impl_runtime_apis!` macro - // to enable functions to call into the runtime. - decl.items.push(parse_quote! { - /// !!INTERNAL USE ONLY!! - #[doc(hidden)] - fn __runtime_api_internal_call_api_at( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: std::vec::Vec, - fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, - ) -> std::result::Result, #crate_::ApiError>; - }); - - decl - } -} - -impl<'a> ToClientSideDecl<'a> { - fn fold_item_trait_items( - &mut self, - items: Vec, - trait_generics_num: usize, - ) -> Vec { + fn fold_item_trait_items(&mut self, items: Vec) -> Vec { let mut result = Vec::new(); items.into_iter().for_each(|i| match i { TraitItem::Method(method) => { - let (fn_decl, fn_decl_ctx) = - self.fold_trait_item_method(method, trait_generics_num); + let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); result.push(fn_decl.into()); result.push(fn_decl_ctx.into()); + + if let Some(fn_impl) = fn_impl { + result.push(fn_impl.into()); + } }, r => result.push(r), }); @@ -374,24 +531,20 @@ impl<'a> ToClientSideDecl<'a> { fn fold_trait_item_method( &mut self, method: TraitItemMethod, - trait_generics_num: usize, - ) -> (TraitItemMethod, TraitItemMethod) { + ) -> (TraitItemMethod, Option, TraitItemMethod) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); - let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num); - let fn_decl_ctx = self.create_method_decl_with_context(method, trait_generics_num); + let fn_impl = self.create_method_runtime_api_impl(method.clone()); + let fn_decl = self.create_method_decl(method.clone(), context); + let fn_decl_ctx = self.create_method_decl_with_context(method); - (fn_decl, fn_decl_ctx) + (fn_decl, fn_impl, fn_decl_ctx) } - fn create_method_decl_with_context( - &mut self, - method: TraitItemMethod, - trait_generics_num: usize, - ) -> TraitItemMethod { + fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); fn_decl_ctx.sig.inputs.insert(2, context_arg); @@ -399,6 +552,52 @@ impl<'a> ToClientSideDecl<'a> { fn_decl_ctx } + /// Takes the given method and creates a `method_runtime_api_impl` method that will be + /// implemented in the runtime for the client side. + fn create_method_runtime_api_impl( + &mut self, + mut method: TraitItemMethod, + ) -> Option { + if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { + return None + } + + let fn_sig = &method.sig; + let ret_type = return_type_extract_type(&fn_sig.output); + + // Get types and if the value is borrowed from all parameters. + // If there is an error, we push it as the block to the user. + let param_types = + match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { + Ok(res) => res + .into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + }, + }; + let name = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); + let block_id = self.block_id; + let crate_ = self.crate_; + + Some(parse_quote! { + #[doc(hidden)] + fn #name( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; + }) + } + /// Takes the method declared by the user and creates the declaration we require for the runtime /// api client side. This method will call by default the `method_runtime_api_impl` for doing /// the actual call into the runtime. @@ -406,7 +605,6 @@ impl<'a> ToClientSideDecl<'a> { &mut self, mut method: TraitItemMethod, context: TokenStream, - trait_generics_num: usize, ) -> TraitItemMethod { let params = match extract_parameter_names_types_and_borrows( &method.sig, @@ -418,42 +616,18 @@ impl<'a> ToClientSideDecl<'a> { Vec::new() }, }; + let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); fold_fn_decl_for_client_side(&mut method.sig, self.block_id, self.crate_); + let name_impl = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); let crate_ = self.crate_; let found_attributes = remove_supported_attributes(&mut method.attrs); - - // Parse the renamed attributes. - let mut renames = Vec::new(); - for (_, a) in found_attributes.iter().filter(|a| a.0 == &RENAMED_ATTRIBUTE) { - match parse_renamed_attribute(a) { - Ok((old_name, version)) => { - renames.push((version, prefix_function_with_trait(&self.trait_, &old_name))); - }, - Err(e) => self.errors.push(e.to_compile_error()), - } - } - - renames.sort_by(|l, r| r.cmp(l)); - let (versions, old_names) = renames.into_iter().fold( - (Vec::new(), Vec::new()), - |(mut versions, mut old_names), (version, old_name)| { - versions.push(version); - old_names.push(old_name); - (versions, old_names) - }, - ); - - // Generate the function name before we may rename it below to - // `function_name_before_version_{}`. - let function_name = prefix_function_with_trait(&self.trait_, &method.sig.ident); - // If the method has a `changed_in` attribute, we need to alter the method name to // `method_before_version_VERSION`. - match get_changed_in(&found_attributes) { + let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { Ok(Some(version)) => { // Make sure that the `changed_in` version is at least the current `api_version`. if get_api_version(self.found_attributes).ok() < Some(version) { @@ -472,50 +646,46 @@ impl<'a> ToClientSideDecl<'a> { ); method.sig.ident = ident; method.attrs.push(parse_quote!( #[deprecated] )); + + let panic = + format!("Calling `{}` should not return a native value!", method.sig.ident); + (quote!(panic!(#panic)), quote!(None)) }, - Ok(None) => {}, + Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), Err(e) => { self.errors.push(e.to_compile_error()); + (quote!(unimplemented!()), quote!(None)) }, }; - // The module where the runtime relevant stuff is declared. - let trait_name = &self.trait_; - let runtime_mod = generate_runtime_mod_name_for_trait(trait_name); - let underscores = (0..trait_generics_num).map(|_| quote!(_)); + let function_name = method.sig.ident.to_string(); // Generate the default implementation that calls the `method_runtime_api_impl` method. method.default = Some(parse_quote! { { - let __runtime_api_impl_params_encoded__ = + let runtime_api_impl_params_encoded = #crate_::Encode::encode(&( #( &#params ),* )); - >::__runtime_api_internal_call_api_at( - self, + self.#name_impl( __runtime_api_at_param__, #context, - __runtime_api_impl_params_encoded__, - &|version| { - #( - // Check if we need to call the function by an old name. - if version.apis.iter().any(|(s, v)| { - s == &#runtime_mod::ID && *v < #versions - }) { - return #old_names - } - )* - - #function_name - } - ) - .and_then(|r| - std::result::Result::map_err( - <#ret_type as #crate_::Decode>::decode(&mut &r[..]), - |err| #crate_::ApiError::FailedToDecodeReturnValue { - function: #function_name, - error: err, + #param_tuple, + runtime_api_impl_params_encoded, + ).and_then(|r| + match r { + #crate_::NativeOrEncoded::Native(n) => { + #native_handling + }, + #crate_::NativeOrEncoded::Encoded(r) => { + std::result::Result::map_err( + <#ret_type as #crate_::Decode>::decode(&mut &r[..]), + |err| #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) } - ) + } ) } }); @@ -535,7 +705,7 @@ impl<'a> Fold for ToClientSideDecl<'a> { if is_core_trait { // Add all the supertraits we want to have for `Core`. - input.supertraits = parse_quote!('static + Send); + input.supertraits = parse_quote!('static + Send + Sync); } else { // Add the `Core` runtime api as super trait. let crate_ = &self.crate_; @@ -544,12 +714,37 @@ impl<'a> Fold for ToClientSideDecl<'a> { // The client side trait is only required when compiling with the feature `std` or `test`. input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - input.items = self.fold_item_trait_items(input.items, input.generics.params.len()); + input.items = self.fold_item_trait_items(input.items); fold::fold_item_trait(self, input) } } +/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. +fn parse_runtime_api_version(version: &Attribute) -> Result { + let meta = version.parse_meta()?; + + let err = Err(Error::new( + meta.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + )); + + match meta { + Meta::List(list) => + if list.nested.len() != 1 { + err + } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { + i.base10_parse() + } else { + err + }, + _ => err, + } +} + /// Generates the identifier as const variable for the given `trait_name` /// by hashing the `trait_name`. fn generate_runtime_api_id(trait_name: &str) -> TokenStream { @@ -626,14 +821,16 @@ fn generate_client_side_decls(decls: &[ItemTrait]) -> Result { let mut errors = Vec::new(); let trait_ = decl.ident.clone(); - let decl = ToClientSideDecl { - crate_: &crate_, - block_id: &block_id, - found_attributes: &mut found_attributes, - errors: &mut errors, - trait_: &trait_, - } - .process(decl); + let decl = { + let mut to_client_side = ToClientSideDecl { + crate_: &crate_, + block_id: &block_id, + found_attributes: &mut found_attributes, + errors: &mut errors, + trait_: &trait_, + }; + to_client_side.fold_item_trait(decl) + }; let api_version = get_api_version(&found_attributes); diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index c3f4e36655d22..02ef37370ffeb 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -17,13 +17,13 @@ use crate::utils::{ extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, - generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, - versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, + generate_hidden_includes, generate_method_runtime_api_impl_name, + generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, + prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; -use crate::common::API_VERSION_ATTRIBUTE; - use proc_macro2::{Span, TokenStream}; use quote::quote; @@ -33,7 +33,8 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, Ident, ImplItem, ItemImpl, Path, Signature, Type, TypePath, + Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, + TypePath, }; use std::collections::HashSet; @@ -104,10 +105,8 @@ fn generate_impl_calls( let mut impl_calls = Vec::new(); for impl_ in impls { - let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); - let impl_trait = extend_with_api_version(impl_trait, trait_api_ver); let impl_trait_ident = &impl_trait_path .segments .last() @@ -211,6 +210,19 @@ fn generate_runtime_api_base_structures() -> Result { recorder: std::option::Option<#crate_::ProofRecorder>, } + // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a + // `ApiRef` object and `ApiRef` also has an associated lifetime. This lifetimes makes it + // impossible to move `RuntimeApi` into another thread. + #[cfg(any(feature = "std", test))] + unsafe impl> Send + for RuntimeApiImpl + {} + + #[cfg(any(feature = "std", test))] + unsafe impl> Sync + for RuntimeApiImpl + {} + #[cfg(any(feature = "std", test))] impl> #crate_::ApiExt for RuntimeApiImpl @@ -264,13 +276,9 @@ fn generate_runtime_api_base_structures() -> Result { std::clone::Clone::clone(&self.recorder) } - fn extract_proof( - &mut self, - ) -> std::option::Option<#crate_::StorageProof> { - let recorder = std::option::Option::take(&mut self.recorder); - std::option::Option::map(recorder, |recorder| { - #crate_::ProofRecorder::::drain_storage_proof(recorder) - }) + fn extract_proof(&mut self) -> std::option::Option<#crate_::StorageProof> { + std::option::Option::take(&mut self.recorder) + .map(|recorder| #crate_::ProofRecorder::::to_storage_proof(&recorder)) } fn into_storage_changes( @@ -318,6 +326,35 @@ fn generate_runtime_api_base_structures() -> Result { #[cfg(any(feature = "std", test))] impl> RuntimeApiImpl { + fn call_api_at< + R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, + F: FnOnce( + &C, + &std::cell::RefCell<#crate_::OverlayedChanges>, + &std::cell::RefCell<#crate_::StorageTransactionCache>, + &std::option::Option<#crate_::ProofRecorder>, + ) -> std::result::Result<#crate_::NativeOrEncoded, E>, + E, + >( + &self, + call_api_at: F, + ) -> std::result::Result<#crate_::NativeOrEncoded, E> { + if *std::cell::RefCell::borrow(&self.commit_on_success) { + #crate_::OverlayedChanges::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); + } + let res = call_api_at( + &self.call, + &self.changes, + &self.storage_transaction_cache, + &self.recorder, + ); + + self.commit_or_rollback(std::result::Result::is_ok(&res)); + res + } + fn commit_or_rollback(&self, commit: bool) { let proof = "\ We only close a transaction when we opened one ourself. @@ -361,24 +398,6 @@ fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { trait_ } -fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { - let version = if let Some(v) = version { - v - } else { - // nothing to do - return trait_ - }; - - let trait_name = &mut trait_ - .segments - .last_mut() - .expect("Trait path should always contain at least one item; qed") - .ident; - *trait_name = versioned_trait_name(trait_name, version); - - trait_ -} - /// Generates the implementations of the apis for the runtime. fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { let mut impls_prepared = Vec::new(); @@ -386,12 +405,9 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { // We put `runtime` before each trait to get the trait that is intended for the runtime and // we put the `RuntimeBlock` as first argument for the trait generics. for impl_ in impls.iter() { - let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; - let mut impl_ = impl_.clone(); let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); let trait_ = extend_with_runtime_decl_path(trait_); - let trait_ = extend_with_api_version(trait_, trait_api_ver); impl_.trait_.as_mut().unwrap().1 = trait_; impl_.attrs = filter_cfg_attrs(&impl_.attrs); @@ -408,72 +424,121 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { /// with code that calls into the runtime. struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { runtime_block: &'a TypePath, + runtime_mod_path: &'a Path, + runtime_type: &'a Type, + trait_generic_arguments: &'a [GenericArgument], + impl_trait: &'a Ident, } -impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { - /// Process the given item implementation. - fn process(mut self, input: ItemImpl) -> ItemImpl { - let mut input = self.fold_item_impl(input); +impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { + fn fold_type_path(&mut self, input: TypePath) -> TypePath { + let new_ty_path = + if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + fold::fold_type_path(self, new_ty_path) + } - // Delete all functions, because all of them are default implemented by - // `decl_runtime_apis!`. We only need to implement the `__runtime_api_internal_call_api_at` - // function. - input.items.clear(); - input.items.push(parse_quote! { - fn __runtime_api_internal_call_api_at( + fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { + let block = { + let runtime_mod_path = self.runtime_mod_path; + let runtime = self.runtime_type; + let native_call_generator_ident = + generate_native_call_generator_fn_name(&input.sig.ident); + let call_api_at_call = generate_call_api_at_fn_name(&input.sig.ident); + let trait_generic_arguments = self.trait_generic_arguments; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Generate the access to the native parameters + let param_tuple_access = if input.sig.inputs.len() == 1 { + vec![quote!(p)] + } else { + input + .sig + .inputs + .iter() + .enumerate() + .map(|(i, _)| { + let i = syn::Index::from(i); + quote!( p.#i ) + }) + .collect::>() + }; + + let (param_types, error) = match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::No, + ) { + Ok(res) => ( + res.into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + None, + ), + Err(e) => (Vec::new(), Some(e.to_compile_error())), + }; + + // Rewrite the input parameters. + input.sig.inputs = parse_quote! { &self, at: &#crate_::BlockId<__SR_API_BLOCK__>, context: #crate_::ExecutionContext, - params: std::vec::Vec, - fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, - ) -> std::result::Result, #crate_::ApiError> { - if *std::cell::RefCell::borrow(&self.commit_on_success) { - #crate_::OverlayedChanges::start_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ); - } - - let res = (|| { - let version = #crate_::CallApiAt::<__SR_API_BLOCK__>::runtime_version_at( - self.call, - at, - )?; - - let params = #crate_::CallApiAtParams { - at, - function: (*fn_name)(version), - arguments: params, - overlayed_changes: &self.changes, - storage_transaction_cache: &self.storage_transaction_cache, - context, - recorder: &self.recorder, - }; + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + }; - #crate_::CallApiAt::<__SR_API_BLOCK__>::call_api_at( - self.call, - params, + input.sig.ident = + generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); + let ret_type = return_type_extract_type(&input.sig.output); + + // Generate the correct return type. + input.sig.output = parse_quote!( + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> + ); + + // Generate the new method implementation that calls into the runtime. + parse_quote!( + { + // Get the error to the user (if we have one). + #error + + self.call_api_at( + | + call_runtime_at, + changes, + storage_transaction_cache, + recorder + | { + #runtime_mod_path #call_api_at_call( + call_runtime_at, + at, + params_encoded, + changes, + storage_transaction_cache, + params.map(|p| { + #runtime_mod_path #native_call_generator_ident :: + <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( + #( #param_tuple_access ),* + ) + }), + context, + recorder, + ) + } ) - })(); - - self.commit_or_rollback(std::result::Result::is_ok(&res)); - - res - } - }); + } + ) + }; + let mut input = fold::fold_impl_item_method(self, input); + // We need to set the block, after we modified the rest of the ast, otherwise we would + // modify our generated block as well. + input.block = block; input } -} - -impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { - fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = - if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; - - fold::fold_type_path(self, new_ty_path) - } fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { // All this `UnwindSafe` magic below here is required for this rust bug: @@ -502,8 +567,6 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> }); - where_clause.predicates.push(parse_quote! { &'static RuntimeApiImplCall: Send }); - // Require that all types used in the function signatures are unwind safe. extract_all_signature_types(&input.items).iter().for_each(|i| { where_clause.predicates.push(parse_quote! { @@ -531,55 +594,45 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result for impl_ in impls { let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; + let impl_trait = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .clone(); let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; + let runtime_type = &impl_.self_ty; let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); // remove the trait to get just the module path runtime_mod_path.segments.pop(); - let processed_impl = - ApiRuntimeImplToApiRuntimeApiImpl { runtime_block }.process(impl_.clone()); + let trait_generic_arguments = match impl_trait.arguments { + PathArguments::Parenthesized(_) | PathArguments::None => vec![], + PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), + }; + + let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { + runtime_block, + runtime_mod_path: &runtime_mod_path, + runtime_type, + trait_generic_arguments: &trait_generic_arguments, + impl_trait: &impl_trait.ident, + }; - result.push(processed_impl); + result.push(visitor.fold_item_impl(impl_.clone())); } Ok(quote!( #( #result )* )) } -fn populate_runtime_api_versions( - result: &mut Vec, - sections: &mut Vec, - attrs: Vec, - id: Path, - version: TokenStream, - crate_access: &TokenStream, -) { - result.push(quote!( - #( #attrs )* - (#id, #version) - )); - - sections.push(quote!( - #( #attrs )* - const _: () = { - // All sections with the same name are going to be merged by concatenation. - #[cfg(not(feature = "std"))] - #[link_section = "runtime_apis"] - static SECTION_CONTENTS: [u8; 12] = #crate_access::serialize_runtime_api_info(#id, #version); - }; - )); -} - /// Generates `RUNTIME_API_VERSIONS` that holds all version information about the implemented /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { - let mut result = Vec::::with_capacity(impls.len()); - let mut sections = Vec::::with_capacity(impls.len()); + let mut result = Vec::with_capacity(impls.len()); + let mut sections = Vec::with_capacity(impls.len()); let mut processed_traits = HashSet::new(); let c = generate_crate_access(HIDDEN_INCLUDES_ID); for impl_ in impls { - let api_ver = extract_api_version(&impl_.attrs, impl_.span())?.map(|a| a as u32); - let mut path = extend_with_runtime_decl_path( extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?.clone(), ); @@ -602,11 +655,23 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } let id: Path = parse_quote!( #path ID ); - let version = quote!( #path VERSION ); + let version: Path = parse_quote!( #path VERSION ); let attrs = filter_cfg_attrs(&impl_.attrs); - let api_ver = api_ver.map(|a| quote!( #a )).unwrap_or_else(|| version); - populate_runtime_api_versions(&mut result, &mut sections, attrs, id, api_ver, &c) + result.push(quote!( + #( #attrs )* + (#id, #version) + )); + + sections.push(quote!( + #( #attrs )* + const _: () = { + // All sections with the same name are going to be merged by concatenation. + #[cfg(not(feature = "std"))] + #[link_section = "runtime_apis"] + static SECTION_CONTENTS: [u8; 12] = #c::serialize_runtime_api_info(#id, #version); + }; + )); } Ok(quote!( @@ -661,33 +726,6 @@ fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { attrs.iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() } -// Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. -// Returns: -// - Err if the version is malformed -// - Some(u64) if the version is set -// - None if the version is not set (this is valid). -fn extract_api_version(attrs: &Vec, span: Span) -> Result> { - // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) - let api_ver = attrs - .iter() - .filter(|a| a.path.is_ident(API_VERSION_ATTRIBUTE)) - .collect::>(); - - if api_ver.len() > 1 { - return Err(Error::new( - span, - format!( - "Found multiple #[{}] attributes for an API implementation. \ - Each runtime API can have only one version.", - API_VERSION_ATTRIBUTE - ), - )) - } - - // Parse the runtime version if there exists one. - api_ver.first().map(|v| parse_runtime_api_version(v)).transpose() -} - #[cfg(test)] mod tests { use super::*; diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 31636b8e2d545..20a2f76f2c83d 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -21,7 +21,6 @@ use proc_macro::TokenStream; -mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index e43a302e18923..6098f8d6bd741 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -18,7 +18,8 @@ use crate::utils::{ extract_block_type_from_trait_path, extract_impl_trait, extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, - return_type_extract_type, AllowSelfRefInParameters, RequireQualifiedTraitPath, + generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -30,7 +31,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, ItemImpl, Pat, Type, TypePath, + Attribute, Ident, ItemImpl, Pat, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. @@ -39,7 +40,7 @@ const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; /// The `advanced` attribute. /// /// If this attribute is given to a function, the function gets access to the `BlockId` as first -/// parameter and needs to return a `Result` with the appropriate error type. +/// parameter and needs to return a `Result` with the appropiate error type. const ADVANCED_ATTRIBUTE: &str = "advanced"; /// The structure used for parsing the runtime api implementations. @@ -104,9 +105,7 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result Option<#crate_::StorageProof> { + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } @@ -127,63 +126,34 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result for #self_ty { - fn __runtime_api_internal_call_api_at( + fn Core_version_runtime_api_impl( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: std::vec::Vec, - _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, - ) -> std::result::Result, #crate_::ApiError> { - unimplemented!("`__runtime_api_internal_call_api_at` not implemented for runtime api mocks") + _: Option<()>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #crate_::ApiError> { + unimplemented!("Not required for testing!") } - fn version( - &self, - _: &#crate_::BlockId<#block_type>, - ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { - unimplemented!("`Core::version` not implemented for runtime api mocks") - } - - fn version_with_context( - &self, - _: &#crate_::BlockId<#block_type>, - _: #crate_::ExecutionContext, - ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { - unimplemented!("`Core::version` not implemented for runtime api mocks") - } - - fn execute_block( - &self, - _: &#crate_::BlockId<#block_type>, - _: #block_type, - ) -> std::result::Result<(), #crate_::ApiError> { - unimplemented!("`Core::execute_block` not implemented for runtime api mocks") - } - - fn execute_block_with_context( + fn Core_execute_block_runtime_api_impl( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: #block_type, - ) -> std::result::Result<(), #crate_::ApiError> { - unimplemented!("`Core::execute_block` not implemented for runtime api mocks") + _: Option<#block_type>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { + unimplemented!("Not required for testing!") } - fn initialize_block( - &self, - _: &#crate_::BlockId<#block_type>, - _: &<#block_type as #crate_::BlockT>::Header, - ) -> std::result::Result<(), #crate_::ApiError> { - unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") - } - - fn initialize_block_with_context( + fn Core_initialize_block_runtime_api_impl( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: &<#block_type as #crate_::BlockT>::Header, - ) -> std::result::Result<(), #crate_::ApiError> { - unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") + _: Option<&<#block_type as #crate_::BlockT>::Header>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { + unimplemented!("Not required for testing!") } } )) @@ -246,53 +216,14 @@ fn get_at_param_name( } } -/// Auxiliary structure to fold a runtime api trait implementation into the expected format. +/// Auxialiry structure to fold a runtime api trait implementation into the expected format. /// /// This renames the methods, changes the method parameters and extracts the error type. struct FoldRuntimeApiImpl<'a> { /// The block type that is being used. block_type: &'a TypePath, -} - -impl<'a> FoldRuntimeApiImpl<'a> { - /// Process the given [`syn::ItemImpl`]. - fn process(mut self, impl_item: syn::ItemImpl) -> syn::ItemImpl { - let mut impl_item = self.fold_item_impl(impl_item); - - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // We also need to overwrite all the `_with_context` methods. To do this, - // we clone all methods and add them again with the new name plus one more argument. - impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| { - if let syn::ImplItem::Method(mut m) = i { - m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident); - m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext )); - - Some(m.into()) - } else { - None - } - })); - - let block_type = self.block_type; - - impl_item.items.push(parse_quote! { - fn __runtime_api_internal_call_api_at( - &self, - _: &#crate_::BlockId<#block_type>, - _: #crate_::ExecutionContext, - _: std::vec::Vec, - _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, - ) -> std::result::Result, #crate_::ApiError> { - unimplemented!( - "`__runtime_api_internal_call_api_at` not implemented for runtime api mocks. \ - Calling deprecated methods is not supported by mocked runtime api." - ) - } - }); - - impl_item - } + /// The identifier of the trait being implemented. + impl_trait: &'a Ident, } impl<'a> Fold for FoldRuntimeApiImpl<'a> { @@ -346,9 +277,14 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { input.sig.inputs = parse_quote! { &self, #at_param_name: #block_id_type, - #( #param_names: #param_types ),* + _: #crate_::ExecutionContext, + ___params___sp___api___: Option<( #( #param_types ),* )>, + _: Vec, }; + input.sig.ident = + generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); + // When using advanced, the user needs to declare the correct return type on its own, // otherwise do it for the user. if !is_advanced { @@ -356,7 +292,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#ret_type, #crate_::ApiError> + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> ); } @@ -368,7 +304,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { quote! { let __fn_implementation__ = move || #orig_block; - Ok(__fn_implementation__()) + Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) } }; @@ -378,6 +314,9 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Get the error to the user (if we have one). #( #errors )* + let (#( #param_names ),*) = ___params___sp___api___ + .expect("Mocked runtime apis don't support calling deprecated api versions"); + #construct_return_value } ) @@ -412,6 +351,11 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Result Some(block_type.clone()), }; - result.push(FoldRuntimeApiImpl { block_type }.process(impl_.clone())); + let mut visitor = FoldRuntimeApiImpl { block_type, impl_trait: &impl_trait.ident }; + + result.push(visitor.fold_item_impl(impl_.clone())); } Ok(GeneratedRuntimeApiImpls { diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 2ccd050cfb151..97b456b62dfa6 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -18,18 +18,16 @@ use proc_macro2::{Span, TokenStream}; use syn::{ - parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, - ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, + parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, + ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::{format_ident, quote}; +use quote::quote; use std::env; use proc_macro_crate::{crate_name, FoundCrate}; -use crate::common::API_VERSION_ATTRIBUTE; - fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) } @@ -70,6 +68,11 @@ pub fn generate_runtime_mod_name_for_trait(trait_: &Ident) -> Ident { Ident::new(&format!("runtime_decl_for_{}", trait_), Span::call_site()) } +/// Generates a name for a method that needs to be implemented in the runtime for the client side. +pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> Ident { + Ident::new(&format!("{}_{}_runtime_api_impl", trait_, method), Span::call_site()) +} + /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { match rt { @@ -163,6 +166,16 @@ pub fn extract_parameter_names_types_and_borrows( Ok(result) } +/// Generates the name for the native call generator function. +pub fn generate_native_call_generator_fn_name(fn_name: &Ident) -> Ident { + Ident::new(&format!("{}_native_call_generator", fn_name), Span::call_site()) +} + +/// Generates the name for the call api at function. +pub fn generate_call_api_at_fn_name(fn_name: &Ident) -> Ident { + Ident::new(&format!("{}_call_api_at", fn_name), Span::call_site()) +} + /// Prefix the given function with the trait name. pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> String { format!("{}_{}", trait_, function.to_string()) @@ -254,23 +267,3 @@ pub fn extract_impl_trait(impl_: &ItemImpl, require: RequireQualifiedTraitPath) } }) } - -/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. -pub fn parse_runtime_api_version(version: &Attribute) -> Result { - let version = version.parse_args::().map_err(|_| { - Error::new( - version.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ), - ) - })?; - - version.base10_parse() -} - -// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 -pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { - format_ident!("{}V{}", trait_ident, version) -} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 91d4b07a1cefc..2635c81948ff3 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -78,10 +78,12 @@ pub use hash_db::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; +#[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::NativeOrEncoded; use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; -#[doc(hidden)] #[cfg(feature = "std")] pub use sp_runtime::StateVersion; #[doc(hidden)] @@ -97,15 +99,16 @@ pub use sp_runtime::{ #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - backend::AsTrieBackend, Backend as StateBackend, InMemoryBackend, OverlayedChanges, - StorageProof, TrieBackend, TrieBackendBuilder, + Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, }; +#[cfg(feature = "std")] +use sp_std::result; #[doc(hidden)] pub use sp_std::{mem, slice}; #[doc(hidden)] pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(feature = "std")] -use std::cell::RefCell; +use std::{cell::RefCell, panic::UnwindSafe}; /// Maximum nesting level for extrinsics. pub const MAX_EXTRINSIC_DEPTH: u32 = 256; @@ -184,56 +187,6 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the /// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` /// to check if the runtime at the given block id implements the requested runtime api trait. -/// -/// # Declaring multiple api versions -/// -/// Optionally multiple versions of the same api can be declared. This is useful for -/// development purposes. For example you want to have a testing version of the api which is -/// available only on a testnet. You can define one stable and one development version. This -/// can be done like this: -/// ```rust -/// sp_api::decl_runtime_apis! { -/// /// Declare the api trait. -/// #[api_version(2)] -/// pub trait Balance { -/// /// Get the balance. -/// fn get_balance() -> u64; -/// /// Set the balance. -/// fn set_balance(val: u64); -/// /// Transfer the balance to another user id -/// #[api_version(3)] -/// fn transfer_balance(uid: u64); -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// The example above defines two api versions - 2 and 3. Version 2 contains `get_balance` and -/// `set_balance`. Version 3 additionally contains `transfer_balance`, which is not available -/// in version 2. Version 2 in this case is considered the default/base version of the api. -/// More than two versions can be defined this way. For example: -/// ```rust -/// sp_api::decl_runtime_apis! { -/// /// Declare the api trait. -/// #[api_version(2)] -/// pub trait Balance { -/// /// Get the balance. -/// fn get_balance() -> u64; -/// /// Set the balance. -/// fn set_balance(val: u64); -/// /// Transfer the balance to another user id -/// #[api_version(3)] -/// fn transfer_balance(uid: u64); -/// /// Clears the balance -/// #[api_version(4)] -/// fn clear_balance(); -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// Note that the latest version (4 in our example above) always contains all methods from all -/// the versions before. pub use sp_api_proc_macro::decl_runtime_apis; /// Tags given trait implementations as runtime apis. @@ -323,22 +276,6 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// /// # fn main() {} /// ``` -/// -/// # Implementing specific api version -/// -/// If `decl_runtime_apis!` declares multiple versions for an api `impl_runtime_apis!` -/// should specify which version it implements by adding `api_version` attribute to the -/// `impl` block. If omitted - the base/default version is implemented. Here is an example: -/// ```ignore -/// sp_api::impl_runtime_apis! { -/// #[api_version(3)] -/// impl self::Balance for Runtime { -/// // implementation -/// } -/// } -/// ``` -/// In this case `Balance` api version 3 is being implemented for `Runtime`. The `impl` block -/// must contain all methods declared in version 3 and below. pub use sp_api_proc_macro::impl_runtime_apis; /// Mocks given trait implementations as runtime apis. @@ -404,13 +341,15 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// using the `advanced` attribute, the macro expects that the first parameter of the function /// is this `at` parameter. Besides that the macro also doesn't do the automatic return value /// rewrite, which means that full return value must be specified. The full return value is -/// constructed like [`Result`]`<, Error>` while `ReturnValue` being the return -/// value that is specified in the trait declaration. +/// constructed like [`Result`]`<`[`NativeOrEncoded`](sp_api::NativeOrEncoded)`, +/// Error>` while `ReturnValue` being the return value that is specified in the trait +/// declaration. /// /// ## Example /// ```rust /// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// # use sp_test_primitives::Block; +/// # use sp_core::NativeOrEncoded; /// # use codec; /// # /// # sp_api::decl_runtime_apis! { @@ -429,13 +368,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result { +/// fn get_balance(&self, at: &BlockId) -> Result, sp_api::ApiError> { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result<(), sp_api::ApiError> { +/// fn set_balance(at: &BlockId, val: u64) -> Result, sp_api::ApiError> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } @@ -451,7 +390,7 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_trie::recorder::Recorder>; +pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] @@ -515,8 +454,6 @@ pub enum ApiError { #[source] error: codec::Error, }, - #[error("The given `StateBackend` isn't a `TrieBackend`.")] - StateBackendIsNotTrie, #[error(transparent)] Application(#[from] Box), } @@ -586,11 +523,16 @@ pub trait ApiExt { /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] -pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend>> { +pub struct CallApiAtParams<'a, Block: BlockT, NC, Backend: StateBackend>> { /// The block id that determines the state that should be setup when calling the function. pub at: &'a BlockId, /// The name of the function that should be called. pub function: &'static str, + /// An optional native call that calls the `function`. This is an optimization to call into a + /// native runtime without requiring to encode/decode the parameters. The native runtime can + /// still be called when this value is `None`, we then just fallback to encoding/decoding the + /// parameters. + pub native_call: Option, /// The encoded arguments of the function. pub arguments: Vec, /// The overlayed changes that are on top of the state. @@ -607,20 +549,20 @@ pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend { /// The state backend that is used to store the block states. - type StateBackend: StateBackend> + AsTrieBackend>; + type StateBackend: StateBackend>; /// Calls the given api function with the given encoded arguments at the given block and returns /// the encoded result. - fn call_api_at( + fn call_api_at< + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( &self, - params: CallApiAtParams, - ) -> Result, ApiError>; + params: CallApiAtParams, + ) -> Result, ApiError>; /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; - - /// Get the state `at` the given block. - fn state_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 2445a5c07f09e..2682c91f94106 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -27,14 +27,14 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) }); c.bench_function("add one with recreating runtime api", |b| { let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) }); @@ -42,7 +42,7 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("vector add one with same runtime api", |b| { let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); let data = vec![0; 1000]; b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) @@ -50,7 +50,7 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("vector add one with recreating runtime api", |b| { let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); let data = vec![0; 1000]; b.iter_with_large_drop(|| client.runtime_api().benchmark_vector_add_one(&block_id, &data)) @@ -60,7 +60,7 @@ fn sp_api_benchmark(c: &mut Criterion) { let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .build(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) }); @@ -68,7 +68,7 @@ fn sp_api_benchmark(c: &mut Criterion) { let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .build(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) }); } diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 42628830cc7fa..1db416a1d3db6 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -18,6 +18,7 @@ use sp_api::{ decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, }; +use sp_core::NativeOrEncoded; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, GetNodeBlockType}, @@ -46,15 +47,6 @@ decl_runtime_apis! { #[changed_in(2)] fn same_name() -> String; } - - #[api_version(2)] - pub trait ApiWithMultipleVersions { - fn stable_one(data: u64); - #[api_version(3)] - fn new_one(); - #[api_version(4)] - fn glory_one(); - } } impl_runtime_apis! { @@ -80,13 +72,6 @@ impl_runtime_apis! { fn same_name() {} } - #[api_version(3)] - impl self::ApiWithMultipleVersions for Runtime { - fn stable_one(_: u64) {} - - fn new_one() {} - } - impl sp_api::Core for Runtime { fn version() -> sp_version::RuntimeVersion { unimplemented!() @@ -119,12 +104,22 @@ mock_impl_runtime_apis! { } #[advanced] - fn same_name(_: &BlockId) -> Result<(), ApiError> { + fn same_name(_: &BlockId) -> + Result< + NativeOrEncoded<()>, + ApiError + > + { Ok(().into()) } #[advanced] - fn wild_card(at: &BlockId, _: u32) -> Result<(), ApiError> { + fn wild_card(at: &BlockId, _: u32) -> + Result< + NativeOrEncoded<()>, + ApiError + > + { if let BlockId::Number(1337) = at { // yeah Ok(().into()) @@ -181,9 +176,6 @@ fn check_runtime_api_info() { &runtime_decl_for_ApiWithCustomVersion::ID, ); assert_eq!(>::VERSION, 2); - - // The stable version of the API - assert_eq!(>::VERSION, 2); } fn check_runtime_api_versions_contains() { @@ -194,9 +186,6 @@ fn check_runtime_api_versions_contains() { fn check_runtime_api_versions() { check_runtime_api_versions_contains::>(); check_runtime_api_versions_contains::>(); - assert!(RUNTIME_API_VERSIONS - .iter() - .any(|v| v == &(>::ID, 3))); check_runtime_api_versions_contains::>(); } @@ -209,7 +198,7 @@ fn mock_runtime_api_has_api() { } #[test] -#[should_panic(expected = "Calling deprecated methods is not supported by mocked runtime api.")] +#[should_panic(expected = "Mocked runtime apis don't support calling deprecated api versions")] fn mock_runtime_api_panics_on_calling_old_version() { let mock = MockApi { block: None }; diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 2ac88c7e6c04f..ba42b342377c7 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -25,7 +25,7 @@ use sp_state_machine::{ }; use substrate_test_runtime_client::{ prelude::*, - runtime::{Block, Header, TestAPI, Transfer}, + runtime::{Block, DecodeFails, Header, TestAPI, Transfer}, DefaultTestClientBuilderExt, TestClientBuilder, }; @@ -51,6 +51,28 @@ fn calling_wasm_runtime_function() { calling_function_with_strat(ExecutionStrategy::AlwaysWasm); } +#[test] +#[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] +fn calling_native_runtime_function_with_non_decodable_parameter() { + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + runtime_api.fail_convert_parameter(&block_id, DecodeFails::default()).unwrap(); +} + +#[test] +#[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] +fn calling_native_runtime_function_with_non_decodable_return_value() { + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + runtime_api.fail_convert_return_value(&block_id).unwrap(); +} + #[test] fn calling_native_runtime_signature_changed_function() { let client = TestClientBuilder::new() @@ -148,8 +170,10 @@ fn record_proof_works() { .build_with_longest_chain(); let block_id = BlockId::Number(client.chain_info().best_number); - let storage_root = - *futures::executor::block_on(longest_chain.best_chain()).unwrap().state_root(); + let storage_root = futures::executor::block_on(longest_chain.best_chain()) + .unwrap() + .state_root() + .clone(); let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode( diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 13af1ded7dc6b..f3d6aa59a0336 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -30,5 +30,4 @@ fn ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); - t.pass("tests/ui/positive_cases/*.rs"); } diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index f5b6ac1da4576..b1478e2f53344 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -15,21 +15,51 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(std::string::String)` +error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait + --> tests/ui/impl_incorrect_method_signature.rs:17:1 + | +17 | sp_api::impl_runtime_apis! { + | -^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found struct `std::string::String` + | | +18 | | impl self::Api for Runtime { +19 | | fn test(data: String) {} +20 | | } +... | +32 | | } +33 | | } + | |_- help: change the parameter type to match the trait: `std::option::Option` + | +note: type in trait + --> tests/ui/impl_incorrect_method_signature.rs:11:1 + | +11 | / sp_api::decl_runtime_apis! { +12 | | pub trait Api { +13 | | fn test(data: u64); +14 | | } +15 | | } + | |_^ + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0308]: mismatched types - --> tests/ui/impl_incorrect_method_signature.rs:19:11 + --> tests/ui/impl_incorrect_method_signature.rs:17:1 | 17 | / sp_api::impl_runtime_apis! { 18 | | impl self::Api for Runtime { 19 | | fn test(data: String) {} - | | ^^^^ expected `u64`, found struct `std::string::String` 20 | | } ... | 32 | | } 33 | | } - | |_- arguments to this function are incorrect + | |_^ expected `u64`, found struct `std::string::String` | -note: associated function defined here - --> tests/ui/impl_incorrect_method_signature.rs:13:6 + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui/impl_incorrect_method_signature.rs:19:11 | -13 | fn test(data: u64); - | ^^^^ +19 | fn test(data: String) {} + | ^^^^ expected `u64`, found struct `std::string::String` diff --git a/primitives/api/test/tests/ui/impl_missing_version.rs b/primitives/api/test/tests/ui/impl_missing_version.rs deleted file mode 100644 index 63e0599622ac9..0000000000000 --- a/primitives/api/test/tests/ui/impl_missing_version.rs +++ /dev/null @@ -1,40 +0,0 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; -use substrate_test_runtime_client::runtime::Block; - -struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} - -sp_api::decl_runtime_apis! { - #[api_version(2)] - pub trait Api { - fn test1(); - fn test2(); - #[api_version(3)] - fn test3(); - } -} - -sp_api::impl_runtime_apis! { - #[api_version(4)] - impl self::Api for Runtime { - fn test1() {} - fn test2() {} - fn test3() {} - } - - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) { - unimplemented!() - } - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/impl_missing_version.stderr b/primitives/api/test/tests/ui/impl_missing_version.stderr deleted file mode 100644 index c0abeffe0cccf..0000000000000 --- a/primitives/api/test/tests/ui/impl_missing_version.stderr +++ /dev/null @@ -1,14 +0,0 @@ -error[E0433]: failed to resolve: could not find `ApiV4` in `runtime_decl_for_Api` - --> tests/ui/impl_missing_version.rs:21:13 - | -21 | impl self::Api for Runtime { - | ^^^ could not find `ApiV4` in `runtime_decl_for_Api` - -error[E0405]: cannot find trait `ApiV4` in module `self::runtime_decl_for_Api` - --> tests/ui/impl_missing_version.rs:21:13 - | -11 | pub trait Api { - | ------------- similarly named trait `ApiV2` defined here -... -21 | impl self::Api for Runtime { - | ^^^ help: a trait with a similar name exists: `ApiV2` diff --git a/primitives/api/test/tests/ui/invalid_api_version_1.rs b/primitives/api/test/tests/ui/invalid_api_version.rs similarity index 100% rename from primitives/api/test/tests/ui/invalid_api_version_1.rs rename to primitives/api/test/tests/ui/invalid_api_version.rs diff --git a/primitives/api/test/tests/ui/invalid_api_version_1.stderr b/primitives/api/test/tests/ui/invalid_api_version.stderr similarity index 65% rename from primitives/api/test/tests/ui/invalid_api_version_1.stderr rename to primitives/api/test/tests/ui/invalid_api_version.stderr index 53ffce959bb66..7770bc70e72d6 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_1.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> tests/ui/invalid_api_version_1.rs:2:2 + --> $DIR/invalid_api_version.rs:2:4 | 2 | #[api_version] - | ^ + | ^^^^^^^^^^^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_2.stderr b/primitives/api/test/tests/ui/invalid_api_version_2.stderr index 0c5274d4680ff..7ca6a7eebe49c 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_2.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_2.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> tests/ui/invalid_api_version_2.rs:2:2 + --> $DIR/invalid_api_version_2.rs:2:4 | 2 | #[api_version("1")] - | ^ + | ^^^^^^^^^^^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_3.stderr b/primitives/api/test/tests/ui/invalid_api_version_3.stderr index 4a34a7aa9b47a..cef4763a6de96 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_3.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_3.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> tests/ui/invalid_api_version_3.rs:2:2 + --> $DIR/invalid_api_version_3.rs:2:4 | 2 | #[api_version()] - | ^ + | ^^^^^^^^^^^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.rs b/primitives/api/test/tests/ui/invalid_api_version_4.rs deleted file mode 100644 index 37b5b6ffa25d1..0000000000000 --- a/primitives/api/test/tests/ui/invalid_api_version_4.rs +++ /dev/null @@ -1,8 +0,0 @@ -sp_api::decl_runtime_apis! { - pub trait Api { - #[api_version("1")] - fn test(data: u64); - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.stderr b/primitives/api/test/tests/ui/invalid_api_version_4.stderr deleted file mode 100644 index 57541a97f6c91..0000000000000 --- a/primitives/api/test/tests/ui/invalid_api_version_4.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> tests/ui/invalid_api_version_4.rs:3:3 - | -3 | #[api_version("1")] - | ^ diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs deleted file mode 100644 index b4f43cd401bba..0000000000000 --- a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs +++ /dev/null @@ -1,9 +0,0 @@ -sp_api::decl_runtime_apis! { - #[api_version(2)] - pub trait Api { - #[api_version(1)] - fn test(data: u64); - } -} - -fn main() {} \ No newline at end of file diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr deleted file mode 100644 index ec4b594023a05..0000000000000 --- a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: Method version `1` is older than (or equal to) trait version `2`.Methods can't define versions older than the trait version. - --> tests/ui/method_ver_lower_than_trait_ver.rs:4:3 - | -4 | #[api_version(1)] - | ^ - -error: Trait version is set here. - --> tests/ui/method_ver_lower_than_trait_ver.rs:2:2 - | -2 | #[api_version(2)] - | ^ diff --git a/primitives/api/test/tests/ui/missing_versioned_method.rs b/primitives/api/test/tests/ui/missing_versioned_method.rs deleted file mode 100644 index d973a94c2101d..0000000000000 --- a/primitives/api/test/tests/ui/missing_versioned_method.rs +++ /dev/null @@ -1,39 +0,0 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; -use substrate_test_runtime_client::runtime::Block; - -struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} - -sp_api::decl_runtime_apis! { - #[api_version(2)] - pub trait Api { - fn test1(); - fn test2(); - #[api_version(3)] - fn test3(); - } -} - -sp_api::impl_runtime_apis! { - #[api_version(3)] - impl self::Api for Runtime { - fn test1() {} - fn test2() {} - } - - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) { - unimplemented!() - } - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method.stderr b/primitives/api/test/tests/ui/missing_versioned_method.stderr deleted file mode 100644 index e3ace7979c27e..0000000000000 --- a/primitives/api/test/tests/ui/missing_versioned_method.stderr +++ /dev/null @@ -1,8 +0,0 @@ -error[E0046]: not all trait items implemented, missing: `test3` - --> tests/ui/missing_versioned_method.rs:21:2 - | -15 | fn test3(); - | ----------- `test3` from trait -... -21 | impl self::Api for Runtime { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs deleted file mode 100644 index 72358b99164d5..0000000000000 --- a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs +++ /dev/null @@ -1,42 +0,0 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; -use substrate_test_runtime_client::runtime::Block; - -struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} - -sp_api::decl_runtime_apis! { - #[api_version(2)] - pub trait Api { - fn test1(); - fn test2(); - #[api_version(3)] - fn test3(); - #[api_version(4)] - fn test4(); - } -} - -sp_api::impl_runtime_apis! { - #[api_version(4)] - impl self::Api for Runtime { - fn test1() {} - fn test2() {} - fn test4() {} - } - - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) { - unimplemented!() - } - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr deleted file mode 100644 index 7354fbd537fd7..0000000000000 --- a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr +++ /dev/null @@ -1,8 +0,0 @@ -error[E0046]: not all trait items implemented, missing: `test3` - --> tests/ui/missing_versioned_method_multiple_vers.rs:23:2 - | -15 | fn test3(); - | ----------- `test3` from trait -... -23 | impl self::Api for Runtime { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index aeef40f4c77d6..fd654ffdc63d6 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self, _: BlockId) -> Result<(), ApiError> { + fn test(&self, _: BlockId) -> Result, ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index 3b3c4e94c3121..befe67c1d0b4a 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -1,10 +1,10 @@ error: `BlockId` needs to be taken by reference and not by value! - --> tests/ui/mock_advanced_block_id_by_value.rs:12:1 + --> $DIR/mock_advanced_block_id_by_value.rs:12:1 | 12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | #[advanced] -15 | | fn test(&self, _: BlockId) -> Result<(), ApiError> { +15 | | fn test(&self, _: BlockId) -> Result, ApiError> { ... | 18 | | } 19 | | } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index 76bf5f1aa7459..a15ef133fa6c4 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self) -> Result<(), ApiError> { + fn test(&self) -> Result, ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr index b9ce7324b5caa..87d3660316b1e 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -1,5 +1,5 @@ error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. - --> tests/ui/mock_advanced_missing_blockid.rs:15:3 + --> $DIR/mock_advanced_missing_blockid.rs:15:3 | -15 | fn test(&self) -> Result<(), ApiError> { +15 | fn test(&self) -> Result, ApiError> { | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 430f63eee1660..c67de70b9c140 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -10,80 +10,62 @@ error: Only `&self` is supported! 16 | fn test2(&mut self, data: u64) {} | ^ -error[E0050]: method `test` has 2 parameters but the declaration in trait `Api::test` has 3 +error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait --> tests/ui/mock_only_self_reference.rs:12:1 | -3 | / sp_api::decl_runtime_apis! { -4 | | pub trait Api { -5 | | fn test(data: u64); - | |_________________________- trait requires 3 parameters -... -12 | / sp_api::mock_impl_runtime_apis! { +12 | sp_api::mock_impl_runtime_apis! { + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_^ expected 3 parameters, found 2 + | |_- help: change the parameter type to match the trait: `Option` | - = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api::test2` has 3 - --> tests/ui/mock_only_self_reference.rs:12:1 +note: type in trait + --> tests/ui/mock_only_self_reference.rs:3:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); - | |__________________________- trait requires 3 parameters -... -12 | / sp_api::mock_impl_runtime_apis! { -13 | | impl Api for MockApi { -14 | | fn test(self, data: u64) {} -15 | | -16 | | fn test2(&mut self, data: u64) {} -17 | | } -18 | | } - | |_^ expected 3 parameters, found 2 - | +7 | | } +8 | | } + | |_^ + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0050]: method `test_with_context` has 3 parameters but the declaration in trait `Api::test_with_context` has 4 +error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait --> tests/ui/mock_only_self_reference.rs:12:1 | -3 | / sp_api::decl_runtime_apis! { -4 | | pub trait Api { -5 | | fn test(data: u64); - | |_________________________- trait requires 4 parameters -... -12 | / sp_api::mock_impl_runtime_apis! { +12 | sp_api::mock_impl_runtime_apis! { + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_^ expected 4 parameters, found 3 + | |_- help: change the parameter type to match the trait: `Option` | - = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0050]: method `test2_with_context` has 3 parameters but the declaration in trait `Api::test2_with_context` has 4 - --> tests/ui/mock_only_self_reference.rs:12:1 +note: type in trait + --> tests/ui/mock_only_self_reference.rs:3:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); - | |__________________________- trait requires 4 parameters -... -12 | / sp_api::mock_impl_runtime_apis! { -13 | | impl Api for MockApi { -14 | | fn test(self, data: u64) {} -15 | | -16 | | fn test2(&mut self, data: u64) {} -17 | | } -18 | | } - | |_^ expected 4 parameters, found 3 - | +7 | | } +8 | | } + | |_^ + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/primitives/api/test/tests/ui/positive_cases/default_impls.rs deleted file mode 100644 index 3434db1089f05..0000000000000 --- a/primitives/api/test/tests/ui/positive_cases/default_impls.rs +++ /dev/null @@ -1,41 +0,0 @@ -use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; -use substrate_test_runtime_client::runtime::Block; - -struct Runtime {} -impl GetNodeBlockType for Runtime { - type NodeBlock = Block; -} - -sp_api::decl_runtime_apis! { - #[api_version(2)] - pub trait Api { - fn test1(); - fn test2(); - #[api_version(3)] - fn test3(); - #[api_version(4)] - fn test4(); - } -} - -sp_api::impl_runtime_apis! { - #[api_version(2)] - impl self::Api for Runtime { - fn test1() {} - fn test2() {} - } - - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) { - unimplemented!() - } - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 06f8226ec88bf..dbc0f6def3aa5 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -15,25 +15,57 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(&u64)` +error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 + | +17 | sp_api::impl_runtime_apis! { + | -^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `&u64` + | | +18 | | impl self::Api for Runtime { +19 | | fn test(data: &u64) { +20 | | unimplemented!() +... | +34 | | } +35 | | } + | |_- help: change the parameter type to match the trait: `std::option::Option` + | +note: type in trait + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:11:1 + | +11 | / sp_api::decl_runtime_apis! { +12 | | pub trait Api { +13 | | fn test(data: u64); +14 | | } +15 | | } + | |_^ + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0308]: mismatched types - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 | 17 | / sp_api::impl_runtime_apis! { 18 | | impl self::Api for Runtime { 19 | | fn test(data: &u64) { - | | ^^^^^^^ expected `u64`, found `&u64` 20 | | unimplemented!() ... | 34 | | } 35 | | } - | |_- arguments to this function are incorrect + | |_^ expected `u64`, found `&u64` | -note: associated function defined here - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:13:6 + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 + | +19 | fn test(data: &u64) { + | ^^^^^^^ expected `u64`, found `&u64` | -13 | fn test(data: u64); - | ^^^^ help: consider removing the borrow | -19 | fn test(data: &u64) { +19 - fn test(data: &u64) { +19 + fn test(data: &u64) { | diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 60eac2247e830..d7046b3254699 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] criterion = "0.3" -primitive-types = "0.12.0" +primitive-types = "0.11.1" sp-core = { version = "6.0.0", features = ["full_crypto"], path = "../core" } rand = "0.7.2" diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index f39e59034dcd0..990106f990323 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5.49" -num-bigint = "0.4.3" -primitive-types = "0.12.0" +num-bigint = "0.2" +primitive-types = "0.11.1" sp-arithmetic = { version = "5.0.0", path = ".." } [[bin]] diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 3a8cb3f37cbd3..714d0a2610312 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.50", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index 22e41b5130abb..320a30770ab42 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -14,18 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } -sp-io = { version = "6.0.0", default-features = false, path = "../io" } -sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../merkle-mountain-range" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] -array-bytes = "4.1" +hex = "0.4.3" +hex-literal = "0.3" sp-keystore = { version = "0.12.0", path = "../keystore" } [features] @@ -33,12 +31,9 @@ default = ["std"] std = [ "codec/std", "scale-info/std", - "serde", "sp-api/std", "sp-application-crypto/std", "sp-core/std", - "sp-io/std", - "sp-mmr-primitives/std", "sp-runtime/std", "sp-std/std", ] diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 5765ff3609dbb..ddf58474e77a0 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -16,10 +16,63 @@ // limitations under the License. use codec::{Decode, Encode, Error, Input}; -use scale_info::TypeInfo; use sp_std::{cmp, prelude::*}; -use crate::{Payload, ValidatorSetId}; +use crate::ValidatorSetId; + +/// Id of different payloads in the [`Commitment`] data +pub type BeefyPayloadId = [u8; 2]; + +/// Registry of all known [`BeefyPayloadId`]. +pub mod known_payload_ids { + use crate::BeefyPayloadId; + + /// A [`Payload`](super::Payload) identifier for Merkle Mountain Range root hash. + /// + /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). + pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; +} + +/// A BEEFY payload type allowing for future extensibility of adding additional kinds of payloads. +/// +/// The idea is to store a vector of SCALE-encoded values with an extra identifier. +/// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected +/// value. Duplicated identifiers are disallowed. It's okay for different implementations to only +/// support a subset of possible values. +#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash)] +pub struct Payload(Vec<(BeefyPayloadId, Vec)>); + +impl Payload { + /// Construct a new payload given an initial vallue + pub fn new(id: BeefyPayloadId, value: Vec) -> Self { + Self(vec![(id, value)]) + } + + /// Returns a raw payload under given `id`. + /// + /// If the [`BeefyPayloadId`] is not found in the payload `None` is returned. + pub fn get_raw(&self, id: &BeefyPayloadId) -> Option<&Vec> { + let index = self.0.binary_search_by(|probe| probe.0.cmp(id)).ok()?; + Some(&self.0[index].1) + } + + /// Returns a decoded payload value under given `id`. + /// + /// In case the value is not there or it cannot be decoded does not match `None` is returned. + pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { + self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) + } + + /// Push a `Vec` with a given id into the payload vec. + /// This method will internally sort the payload vec after every push. + /// + /// Returns self to allow for daisy chaining. + pub fn push_raw(mut self, id: BeefyPayloadId, value: Vec) -> Self { + self.0.push((id, value)); + self.0.sort_by_key(|(id, _)| *id); + self + } +} /// A commitment signed by GRANDPA validators as part of BEEFY protocol. /// @@ -27,7 +80,7 @@ use crate::{Payload, ValidatorSetId}; /// height [block_number](Commitment::block_number). /// GRANDPA validators collect signatures on commitments and a stream of such signed commitments /// (see [SignedCommitment]) forms the BEEFY protocol. -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo)] +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] pub struct Commitment { /// A collection of payloads to be signed, see [`Payload`] for details. /// @@ -85,7 +138,7 @@ where /// Note that SCALE-encoding of the structure is optimized for size efficiency over the wire, /// please take a look at custom [`Encode`] and [`Decode`] implementations and /// `CompactSignedCommitment` struct. -#[derive(Clone, Debug, PartialEq, Eq, TypeInfo)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct SignedCommitment { /// The commitment signatures are collected for. pub commitment: Commitment, @@ -248,18 +301,19 @@ impl From> for VersionedFinalityProof { #[cfg(test)] mod tests { - use super::*; - use crate::{crypto, known_payloads, KEY_TYPE}; - use codec::Decode; + use sp_core::{keccak_256, Pair}; use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; + use super::*; + use codec::Decode; + + use crate::{crypto, KEY_TYPE}; + type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; type TestVersionedFinalityProof = VersionedFinalityProof; - const LARGE_RAW_COMMITMENT: &[u8] = include_bytes!("../test-res/large-raw-commitment"); - // The mock signatures are equivalent to the ones produced by the BEEFY keystore fn mock_signatures() -> (crypto::Signature, crypto::Signature) { let store: SyncCryptoStorePtr = KeyStore::new().into(); @@ -285,8 +339,7 @@ mod tests { #[test] fn commitment_encode_decode() { // given - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -298,7 +351,7 @@ mod tests { assert_eq!(decoded, Ok(commitment)); assert_eq!( encoded, - array_bytes::hex2bytes_unchecked( + hex_literal::hex!( "046d68343048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000" ) ); @@ -307,8 +360,7 @@ mod tests { #[test] fn signed_commitment_encode_decode() { // given - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -327,14 +379,12 @@ mod tests { assert_eq!(decoded, Ok(signed)); assert_eq!( encoded, - array_bytes::hex2bytes_unchecked( - "\ - 046d68343048656c6c6f20576f726c64210500000000000000000000000000000000000000000000000\ - 4300400000008558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746c\ - c321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba012d6e1f8105c337a86cdd9aa\ - acdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b6\ - a0046395a71681be3d0c2a00\ - " + hex_literal::hex!( + "046d68343048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000 + 04300400000008558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746 + cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba012d6e1f8105c337a86cdd9a + aacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b + 6a0046395a71681be3d0c2a00" ) ); } @@ -342,8 +392,7 @@ mod tests { #[test] fn signed_commitment_count_signatures() { // given - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -368,8 +417,7 @@ mod tests { block_number: u128, validator_set_id: crate::ValidatorSetId, ) -> TestCommitment { - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); Commitment { payload, block_number, validator_set_id } } @@ -389,8 +437,7 @@ mod tests { #[test] fn versioned_commitment_encode_decode() { - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -416,8 +463,7 @@ mod tests { #[test] fn large_signed_commitment_encode_decode() { // given - let payload = - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); + let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -435,6 +481,56 @@ mod tests { // then assert_eq!(decoded, Ok(signed)); - assert_eq!(encoded, LARGE_RAW_COMMITMENT); + assert_eq!( + encoded, + hex_literal::hex!( + "046d68343048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000 + 05020000000000000000000000000000000000000000000000000000000000000000000000000000000 + 000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + fffffffffff0000040000b10a558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed + 4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad812 + 79df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d1 + 0dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2 + ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01 + 558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e9 + 9a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72 + d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bc + b7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc3 + 21f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc9855 + 80e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0 + c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da + 8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279d + f0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd + 3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac8 + 0a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558 + 455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a8 + 30e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d94 + 8d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb78 + 16f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f + 2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e + 4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33 + c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da848 + 0c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df07 + 95cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd + 68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a0 + 9abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455 + ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e + 314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1 + 107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f + 9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f231 + 9a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb + 75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86 + e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c7 + 46cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795c + c985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68c + e3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09ab + ed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad8 + 1279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314 + d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107 + b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba + 01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01" + ) + ); } } diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs index 453eb67315d4e..87f1b8756af65 100644 --- a/primitives/beefy/src/lib.rs +++ b/primitives/beefy/src/lib.rs @@ -33,38 +33,21 @@ mod commitment; pub mod mmr; -mod payload; pub mod witness; -pub use commitment::{Commitment, SignedCommitment, VersionedFinalityProof}; -pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; +pub use commitment::{ + known_payload_ids, BeefyPayloadId, Commitment, Payload, SignedCommitment, + VersionedFinalityProof, +}; use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; -use sp_application_crypto::RuntimeAppPublic; use sp_core::H256; -use sp_runtime::traits::Hash; use sp_std::prelude::*; /// Key type for BEEFY module. pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"beef"); -/// Trait representing BEEFY authority id. -pub trait BeefyAuthorityId: RuntimeAppPublic {} - -/// Means of verification for a BEEFY authority signature. -/// -/// Accepts custom hashing fn for the message and custom convertor fn for the signer. -pub trait BeefyVerify { - /// Type of the signer. - type Signer: BeefyAuthorityId; - - /// Verify a signature. - /// - /// Return `true` if signature is valid for the value. - fn verify(&self, msg: &[u8], signer: &Self::Signer) -> bool; -} - /// BEEFY cryptographic types /// /// This module basically introduces three crypto types: @@ -78,9 +61,7 @@ pub trait BeefyVerify { /// The current underlying crypto scheme used is ECDSA. This can be changed, /// without affecting code restricted against the above listed crypto types. pub mod crypto { - use super::{BeefyAuthorityId, BeefyVerify, Hash}; use sp_application_crypto::{app_crypto, ecdsa}; - use sp_core::crypto::Wraps; app_crypto!(ecdsa, crate::KEY_TYPE); /// Identity of a BEEFY authority using ECDSA as its crypto. @@ -88,26 +69,6 @@ pub mod crypto { /// Signature for a BEEFY authority using ECDSA as its crypto. pub type AuthoritySignature = Signature; - - impl BeefyAuthorityId for AuthorityId {} - - impl BeefyVerify for AuthoritySignature - where - ::Output: Into<[u8; 32]>, - { - type Signer = AuthorityId; - - fn verify(&self, msg: &[u8], signer: &Self::Signer) -> bool { - let msg_hash = ::hash(msg).into(); - match sp_io::crypto::secp256k1_ecdsa_recover_compressed( - self.as_inner_ref().as_ref(), - &msg_hash, - ) { - Ok(raw_pubkey) => raw_pubkey.as_ref() == AsRef::<[u8]>::as_ref(signer), - _ => false, - } - } - } } /// The `ConsensusEngineId` of BEEFY. @@ -220,8 +181,7 @@ sp_api::decl_runtime_apis! { mod tests { use super::*; use sp_application_crypto::ecdsa::{self, Public}; - use sp_core::{blake2_256, crypto::Wraps, keccak_256, Pair}; - use sp_runtime::traits::{BlakeTwo256, Keccak256}; + use sp_core::Pair; #[test] fn validator_set() { @@ -235,36 +195,4 @@ mod tests { assert_eq!(validators.id(), set_id); assert_eq!(validators.validators(), &vec![alice.public()]); } - - #[test] - fn beefy_verify_works() { - let msg = &b"test-message"[..]; - let (pair, _) = crypto::Pair::generate(); - - let keccak_256_signature: crypto::Signature = - pair.as_inner_ref().sign_prehashed(&keccak_256(msg)).into(); - - let blake2_256_signature: crypto::Signature = - pair.as_inner_ref().sign_prehashed(&blake2_256(msg)).into(); - - // Verification works if same hashing function is used when signing and verifying. - assert!(BeefyVerify::::verify(&keccak_256_signature, msg, &pair.public())); - assert!(BeefyVerify::::verify(&blake2_256_signature, msg, &pair.public())); - // Verification fails if distinct hashing functions are used when signing and verifying. - assert!(!BeefyVerify::::verify(&blake2_256_signature, msg, &pair.public())); - assert!(!BeefyVerify::::verify(&keccak_256_signature, msg, &pair.public())); - - // Other public key doesn't work - let (other_pair, _) = crypto::Pair::generate(); - assert!(!BeefyVerify::::verify( - &keccak_256_signature, - msg, - &other_pair.public() - )); - assert!(!BeefyVerify::::verify( - &blake2_256_signature, - msg, - &other_pair.public() - )); - } } diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 549d2edbdf287..761eee9f8ef85 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -17,8 +17,8 @@ //! BEEFY + MMR utilties. //! -//! While BEEFY can be used completely independently as an additional consensus gadget, -//! it is designed around a main use case of bridging standalone networks together. +//! While BEEFY can be used completely indepentently as an additional consensus gadget, +//! it is designed around a main use case of making bridging standalone networks together. //! For that use case it's common to use some aggregated data structure (like MMR) to be //! used in conjunction with BEEFY, to be able to efficiently prove any past blockchain data. //! @@ -26,13 +26,9 @@ //! but we imagine they will be useful for other chains that either want to bridge with Polkadot //! or are completely standalone, but heavily inspired by Polkadot. -use crate::{crypto::AuthorityId, ConsensusLog, MmrRootHash, Vec, BEEFY_ENGINE_ID}; +use crate::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_runtime::{ - generic::OpaqueDigestItemId, - traits::{Block, Header}, -}; /// A provider for extra data that gets added to the Mmr leaf pub trait BeefyDataProvider { @@ -48,7 +44,7 @@ impl BeefyDataProvider> for () { } /// A standard leaf that gets added every block to the MMR constructed by Substrate's `pallet_mmr`. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct MmrLeaf { /// Version of the leaf format. /// @@ -64,7 +60,7 @@ pub struct MmrLeaf { pub leaf_extra: ExtraData, } -/// An MMR leaf versioning scheme. +/// A MMR leaf versioning scheme. /// /// Version is a single byte that constist of two components: /// - `major` - 3 bits @@ -77,7 +73,7 @@ pub struct MmrLeaf { /// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be /// still decoded, the new fields will simply be ignored). We expect the major version to be bumped /// very rarely (hopefuly never). -#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode)] pub struct MmrLeafVersion(u8); impl MmrLeafVersion { /// Create new version object from `major` and `minor` components. @@ -101,7 +97,6 @@ impl MmrLeafVersion { /// Details of a BEEFY authority set. #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct BeefyAuthoritySet { /// Id of the set. /// @@ -126,78 +121,9 @@ pub struct BeefyAuthoritySet { /// Details of the next BEEFY authority set. pub type BeefyNextAuthoritySet = BeefyAuthoritySet; -/// Extract the MMR root hash from a digest in the given header, if it exists. -pub fn find_mmr_root_digest(header: &B::Header) -> Option { - let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); - - let filter = |log: ConsensusLog| match log { - ConsensusLog::MmrRoot(root) => Some(root), - _ => None, - }; - header.digest().convert_first(|l| l.try_to(id).and_then(filter)) -} - -#[cfg(feature = "std")] -pub use mmr_root_provider::MmrRootProvider; -#[cfg(feature = "std")] -mod mmr_root_provider { - use super::*; - use crate::{known_payloads, payload::PayloadProvider, Payload}; - use sp_api::{NumberFor, ProvideRuntimeApi}; - use sp_mmr_primitives::MmrApi; - use sp_runtime::generic::BlockId; - use sp_std::{marker::PhantomData, sync::Arc}; - - /// A [`crate::Payload`] provider where payload is Merkle Mountain Range root hash. - /// - /// Encoded payload contains a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). - pub struct MmrRootProvider { - runtime: Arc, - _phantom: PhantomData, - } - - impl MmrRootProvider - where - B: Block, - R: ProvideRuntimeApi, - R::Api: MmrApi>, - { - /// Create new BEEFY Payload provider with MMR Root as payload. - pub fn new(runtime: Arc) -> Self { - Self { runtime, _phantom: PhantomData } - } - - /// Simple wrapper that gets MMR root from header digests or from client state. - fn mmr_root_from_digest_or_runtime(&self, header: &B::Header) -> Option { - find_mmr_root_digest::(header).or_else(|| { - self.runtime - .runtime_api() - .mmr_root(&BlockId::hash(header.hash())) - .ok() - .and_then(|r| r.ok()) - }) - } - } - - impl PayloadProvider for MmrRootProvider - where - B: Block, - R: ProvideRuntimeApi, - R::Api: MmrApi>, - { - fn payload(&self, header: &B::Header) -> Option { - self.mmr_root_from_digest_or_runtime(header).map(|mmr_root| { - Payload::from_single_entry(known_payloads::MMR_ROOT_ID, mmr_root.encode()) - }) - } - } -} - #[cfg(test)] mod tests { use super::*; - use crate::H256; - use sp_runtime::{traits::BlakeTwo256, Digest, DigestItem, OpaqueExtrinsic}; #[test] fn should_construct_version_correctly() { @@ -221,30 +147,4 @@ mod tests { fn should_panic_if_minor_too_large() { MmrLeafVersion::new(0, 32); } - - #[test] - fn extract_mmr_root_digest() { - type Header = sp_runtime::generic::Header; - type Block = sp_runtime::generic::Block; - let mut header = Header::new( - 1u64, - Default::default(), - Default::default(), - Default::default(), - Digest::default(), - ); - - // verify empty digest shows nothing - assert!(find_mmr_root_digest::(&header).is_none()); - - let mmr_root_hash = H256::random(); - header.digest_mut().push(DigestItem::Consensus( - BEEFY_ENGINE_ID, - ConsensusLog::::MmrRoot(mmr_root_hash).encode(), - )); - - // verify validator set is correctly extracted from digest - let extracted = find_mmr_root_digest::(&header); - assert_eq!(extracted, Some(mmr_root_hash)); - } } diff --git a/primitives/beefy/src/payload.rs b/primitives/beefy/src/payload.rs deleted file mode 100644 index 0f23c3f381f19..0000000000000 --- a/primitives/beefy/src/payload.rs +++ /dev/null @@ -1,105 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_runtime::traits::Block; -use sp_std::prelude::*; - -/// Id of different payloads in the [`crate::Commitment`] data. -pub type BeefyPayloadId = [u8; 2]; - -/// Registry of all known [`BeefyPayloadId`]. -pub mod known_payloads { - use crate::BeefyPayloadId; - - /// A [`Payload`](super::Payload) identifier for Merkle Mountain Range root hash. - /// - /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). - pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; -} - -/// A BEEFY payload type allowing for future extensibility of adding additional kinds of payloads. -/// -/// The idea is to store a vector of SCALE-encoded values with an extra identifier. -/// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected -/// value. Duplicated identifiers are disallowed. It's okay for different implementations to only -/// support a subset of possible values. -#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] -pub struct Payload(Vec<(BeefyPayloadId, Vec)>); - -impl Payload { - /// Construct a new payload given an initial vallue - pub fn from_single_entry(id: BeefyPayloadId, value: Vec) -> Self { - Self(vec![(id, value)]) - } - - /// Returns a raw payload under given `id`. - /// - /// If the [`BeefyPayloadId`] is not found in the payload `None` is returned. - pub fn get_raw(&self, id: &BeefyPayloadId) -> Option<&Vec> { - let index = self.0.binary_search_by(|probe| probe.0.cmp(id)).ok()?; - Some(&self.0[index].1) - } - - /// Returns a decoded payload value under given `id`. - /// - /// In case the value is not there or it cannot be decoded does not match `None` is returned. - pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { - self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) - } - - /// Push a `Vec` with a given id into the payload vec. - /// This method will internally sort the payload vec after every push. - /// - /// Returns self to allow for daisy chaining. - pub fn push_raw(mut self, id: BeefyPayloadId, value: Vec) -> Self { - self.0.push((id, value)); - self.0.sort_by_key(|(id, _)| *id); - self - } -} - -/// Trait for custom BEEFY payload providers. -pub trait PayloadProvider { - /// Provide BEEFY payload if available for `header`. - fn payload(&self, header: &B::Header) -> Option; -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn payload_methods_work_as_expected() { - let id1: BeefyPayloadId = *b"hw"; - let msg1: String = "1. Hello World!".to_string(); - let id2: BeefyPayloadId = *b"yb"; - let msg2: String = "2. Yellow Board!".to_string(); - let id3: BeefyPayloadId = *b"cs"; - let msg3: String = "3. Cello Cord!".to_string(); - - let payload = Payload::from_single_entry(id1, msg1.encode()) - .push_raw(id2, msg2.encode()) - .push_raw(id3, msg3.encode()); - - assert_eq!(payload.get_decoded(&id1), Some(msg1)); - assert_eq!(payload.get_decoded(&id2), Some(msg2)); - assert_eq!(payload.get_raw(&id3), Some(&msg3.encode())); - assert_eq!(payload.get_raw(&known_payloads::MMR_ROOT_ID), None); - } -} diff --git a/primitives/beefy/src/witness.rs b/primitives/beefy/src/witness.rs index 2c45e0ade90c4..aae0608150534 100644 --- a/primitives/beefy/src/witness.rs +++ b/primitives/beefy/src/witness.rs @@ -81,7 +81,7 @@ mod tests { use super::*; use codec::Decode; - use crate::{crypto, known_payloads, Payload, KEY_TYPE}; + use crate::{crypto, known_payload_ids, Payload, KEY_TYPE}; type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; @@ -111,10 +111,8 @@ mod tests { } fn signed_commitment() -> TestSignedCommitment { - let payload = Payload::from_single_entry( - known_payloads::MMR_ROOT_ID, - "Hello World!".as_bytes().to_vec(), - ); + let payload = + Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".as_bytes().to_vec()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -150,14 +148,12 @@ mod tests { assert_eq!(decoded, Ok(witness)); assert_eq!( encoded, - array_bytes::hex2bytes_unchecked( - "\ - 046d683048656c6c6f20576f726c642105000000000000000000000000000000000000000000000010\ - 0000010110000001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c\ - 746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a8\ - 6cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487b\ - ca2324b6a0046395a71681be3d0c2a00\ - " + hex_literal::hex!( + "046d683048656c6c6f20576f726c642105000000000000000000000000000000000000000000000010 + 0000010110000001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c + 746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a86 + cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bc + a2324b6a0046395a71681be3d0c2a00" ) ); } diff --git a/primitives/beefy/test-res/large-raw-commitment b/primitives/beefy/test-res/large-raw-commitment deleted file mode 100644 index d5dbbe402a88e734367395e5af0305f5a47a0872..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44638 zcmeI)F-t;G7zW^@=#W%8gtJb8lR;zccL^uSp_~*Pt_adlGzHavP;nQep($tzT#6_V zC+DVyK-!$@58T!5x#JxiMDIQ4JMa5sx}C#n)Q;m`>9p66Tjgv>zOw1$`$>26f19{M z$b|5itKZfiZuZBktFyPe#%}HDyuXa*h3m>|Zfp8;GKsb?;#v9kVAvc?s$YBaoyAAz z^Y~?UbpJhizbvkwQ$E0V;CJA^;CbMg=)K_GXnkOfWSwZOY`: Send + Sync { /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is /// not found. fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id).and_then(|h| { - h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) + self.block_hash_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) }) } } @@ -89,9 +89,9 @@ pub trait Backend: HeaderBackend + HeaderMetadata { /// Get block body. Returns `None` if block is not found. - fn body(&self, hash: Block::Hash) -> Result::Extrinsic>>>; + fn body(&self, id: BlockId) -> Result::Extrinsic>>>; /// Get block justifications. Returns `None` if no justification exists. - fn justifications(&self, hash: Block::Hash) -> Result>; + fn justifications(&self, id: BlockId) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; @@ -231,14 +231,14 @@ pub trait Backend: /// Get single indexed transaction by content hash. Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction(&self, hash: Block::Hash) -> Result>>; + fn indexed_transaction(&self, hash: &Block::Hash) -> Result>>; /// Check if indexed transaction exists. - fn has_indexed_transaction(&self, hash: Block::Hash) -> Result { + fn has_indexed_transaction(&self, hash: &Block::Hash) -> Result { Ok(self.indexed_transaction(hash)?.is_some()) } - fn block_indexed_body(&self, hash: Block::Hash) -> Result>>>; + fn block_indexed_body(&self, id: BlockId) -> Result>>>; } /// Blockchain info diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 1db37b47e4d44..46477a75b8a1f 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -21,7 +21,6 @@ use lru::LruCache; use parking_lot::RwLock; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One}; -use std::num::NonZeroUsize; /// Set to the expected max difference between `best` and `finalized` blocks at sync. const LRU_CACHE_SIZE: usize = 5_000; @@ -177,13 +176,6 @@ pub struct TreeRoute { } impl TreeRoute { - /// Creates a new `TreeRoute`. - /// - /// It is required that `pivot >= route.len()`, otherwise it may panics. - pub fn new(route: Vec>, pivot: usize) -> Self { - TreeRoute { route, pivot } - } - /// Get a slice of all retracted blocks in reverse order (towards common ancestor). pub fn retracted(&self) -> &[HashAndNumber] { &self.route[..self.pivot] @@ -240,15 +232,14 @@ pub struct HeaderMetadataCache { impl HeaderMetadataCache { /// Creates a new LRU header metadata cache with `capacity`. - pub fn new(capacity: NonZeroUsize) -> Self { + pub fn new(capacity: usize) -> Self { HeaderMetadataCache { cache: RwLock::new(LruCache::new(capacity)) } } } impl Default for HeaderMetadataCache { fn default() -> Self { - let cap = NonZeroUsize::new(LRU_CACHE_SIZE).expect("cache capacity is not zero"); - HeaderMetadataCache { cache: RwLock::new(LruCache::new(cap)) } + HeaderMetadataCache { cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)) } } } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 30f5c89650a78..dbb67a27c5144 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.50", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index 2c6a97b934137..3e47adf0bf92f 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -89,7 +89,7 @@ sp_api::decl_runtime_apis! { /// Currently, only the value provided by this type at genesis will be used. fn slot_duration() -> SlotDuration; - /// Return the current set of authorities. + // Return the current set of authorities. fn authorities() -> Vec; } } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 049e511175867..736a78ab67b1a 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.50", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } merlin = { version = "2.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 621ab859b914f..492d1a9a7238f 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -137,7 +137,7 @@ pub enum ConsensusLog { /// Configuration data used by the BABE consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -pub struct BabeConfigurationV1 { +pub struct BabeGenesisConfigurationV1 { /// The slot duration in milliseconds for BABE. Currently, only /// the value provided by this type at genesis will be used. /// @@ -156,7 +156,7 @@ pub struct BabeConfigurationV1 { pub c: (u64, u64), /// The authorities for the genesis epoch. - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// The randomness for the genesis epoch. pub randomness: Randomness, @@ -166,13 +166,13 @@ pub struct BabeConfigurationV1 { pub secondary_slots: bool, } -impl From for BabeConfiguration { - fn from(v1: BabeConfigurationV1) -> Self { +impl From for BabeGenesisConfiguration { + fn from(v1: BabeGenesisConfigurationV1) -> Self { Self { slot_duration: v1.slot_duration, epoch_length: v1.epoch_length, c: v1.c, - authorities: v1.authorities, + genesis_authorities: v1.genesis_authorities, randomness: v1.randomness, allowed_slots: if v1.secondary_slots { AllowedSlots::PrimaryAndSecondaryPlainSlots @@ -185,7 +185,7 @@ impl From for BabeConfiguration { /// Configuration data used by the BABE consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -pub struct BabeConfiguration { +pub struct BabeGenesisConfiguration { /// The slot duration in milliseconds for BABE. Currently, only /// the value provided by this type at genesis will be used. /// @@ -203,23 +203,16 @@ pub struct BabeConfiguration { /// of a slot being empty. pub c: (u64, u64), - /// The authorities - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// The authorities for the genesis epoch. + pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// The randomness + /// The randomness for the genesis epoch. pub randomness: Randomness, /// Type of allowed slots. pub allowed_slots: AllowedSlots, } -impl BabeConfiguration { - /// Convenience method to get the slot duration as a `SlotDuration` value. - pub fn slot_duration(&self) -> SlotDuration { - SlotDuration::from_millis(self.slot_duration) - } -} - /// Types of allowed slots. #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -244,7 +237,7 @@ impl AllowedSlots { } } -/// Configuration data used by the BABE consensus engine that may change with epochs. +/// Configuration data used by the BABE consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BabeEpochConfiguration { @@ -364,12 +357,12 @@ sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)] pub trait BabeApi { - /// Return the configuration for BABE. - fn configuration() -> BabeConfiguration; + /// Return the genesis configuration for BABE. The configuration is only read on genesis. + fn configuration() -> BabeGenesisConfiguration; /// Return the configuration for BABE. Version 1. #[changed_in(2)] - fn configuration() -> BabeConfigurationV1; + fn configuration() -> BabeGenesisConfigurationV1; /// Returns the slot that started the current epoch. fn current_epoch_start() -> Slot; diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index d160cd118998c..973cb3e410e0d 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.42" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 458a5eee259a9..4539cec2c8e0a 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -25,6 +25,7 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; use sp_runtime::{ + generic::BlockId, traits::{Block as BlockT, HashFor}, Digest, }; @@ -235,10 +236,10 @@ pub trait Proposer { pub trait SyncOracle { /// Whether the synchronization service is undergoing major sync. /// Returns true if so. - fn is_major_syncing(&self) -> bool; + fn is_major_syncing(&mut self) -> bool; /// Whether the synchronization service is offline. /// Returns true if so. - fn is_offline(&self) -> bool; + fn is_offline(&mut self) -> bool; } /// A synchronization oracle for when there is no network. @@ -246,10 +247,10 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&self) -> bool { + fn is_major_syncing(&mut self) -> bool { false } - fn is_offline(&self) -> bool { + fn is_offline(&mut self) -> bool { false } } @@ -257,13 +258,71 @@ impl SyncOracle for NoNetwork { impl SyncOracle for Arc where T: ?Sized, - T: SyncOracle, + for<'r> &'r T: SyncOracle, { - fn is_major_syncing(&self) -> bool { - T::is_major_syncing(self) + fn is_major_syncing(&mut self) -> bool { + <&T>::is_major_syncing(&mut &**self) } - fn is_offline(&self) -> bool { - T::is_offline(self) + fn is_offline(&mut self) -> bool { + <&T>::is_offline(&mut &**self) + } +} + +/// Checks if the current active native block authoring implementation can author with the runtime +/// at the given block. +pub trait CanAuthorWith { + /// See trait docs for more information. + /// + /// # Return + /// + /// - Returns `Ok(())` when authoring is supported. + /// - Returns `Err(_)` when authoring is not supported. + fn can_author_with(&self, at: &BlockId) -> Result<(), String>; +} + +/// Checks if the node can author blocks by using +/// [`NativeVersion::can_author_with`](sp_version::NativeVersion::can_author_with). +#[derive(Clone)] +pub struct CanAuthorWithNativeVersion(T); + +impl CanAuthorWithNativeVersion { + /// Creates a new instance of `Self`. + pub fn new(inner: T) -> Self { + Self(inner) + } +} + +impl + sp_version::GetNativeVersion, Block: BlockT> + CanAuthorWith for CanAuthorWithNativeVersion +{ + fn can_author_with(&self, at: &BlockId) -> Result<(), String> { + match self.0.runtime_version(at) { + Ok(version) => self.0.native_version().can_author_with(&version), + Err(e) => Err(format!( + "Failed to get runtime version at `{}` and will disable authoring. Error: {}", + at, e, + )), + } + } +} + +/// Returns always `true` for `can_author_with`. This is useful for tests. +#[derive(Clone)] +pub struct AlwaysCanAuthor; + +impl CanAuthorWith for AlwaysCanAuthor { + fn can_author_with(&self, _: &BlockId) -> Result<(), String> { + Ok(()) + } +} + +/// Never can author. +#[derive(Clone)] +pub struct NeverCanAuthor; + +impl CanAuthorWith for NeverCanAuthor { + fn can_author_with(&self, _: &BlockId) -> Result<(), String> { + Err("Authoring is always disabled.".to_string()) } } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 3e8bac51289b8..fced678293140 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -21,47 +21,49 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } -impl-serde = { version = "0.4.0", optional = true } -wasmi = { version = "0.13", optional = true } +primitive-types = { version = "0.11.1", default-features = false, features = ["codec", "scale-info"] } +impl-serde = { version = "0.3.0", optional = true } +wasmi = { version = "0.9.1", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.2.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.4", optional = true } tiny-bip39 = { version = "0.8.2", optional = true } -regex = { version = "1.6.0", optional = true } +regex = { version = "1.5.4", optional = true } num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } -parking_lot = { version = "0.12.1", optional = true } +parking_lot = { version = "0.12.0", optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.30", optional = true } bitflags = "1.3" # full crypto -array-bytes = { version = "4.1", optional = true } -ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } -blake2 = { version = "0.10.4", default-features = false, optional = true } +ed25519-dalek = { version = "1.0.1", default-features = false, features = ["u64_backend", "alloc"], optional = true } +blake2-rfc = { version = "0.2.18", default-features = false, optional = true } schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", "u64_backend", ], default-features = false, optional = true } +hex = { version = "0.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } -ss58-registry = { version = "1.34.0", default-features = false } +ss58-registry = { version = "1.18.0", default-features = false } sp-core-hashing = { version = "4.0.0", path = "./hashing", default-features = false, optional = true } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } [dev-dependencies] sp-serializer = { version = "4.0.0-dev", path = "../serializer" } +hex-literal = "0.3.4" rand = "0.7.2" criterion = "0.3.3" serde_json = "1.0" @@ -77,7 +79,6 @@ bench = false [features] default = ["std"] std = [ - "merlin?/std", "full_crypto", "log/std", "thiserror", @@ -95,9 +96,9 @@ std = [ "hash-db/std", "sp-std/std", "serde", - "blake2/std", - "array-bytes", - "ed25519-zebra/std", + "blake2-rfc/std", + "ed25519-dalek/std", + "hex/std", "base58", "substrate-bip39", "tiny-bip39", @@ -126,10 +127,10 @@ std = [ # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "array-bytes", - "ed25519-zebra", - "blake2", + "ed25519-dalek", + "blake2-rfc", "schnorrkel", + "hex", "libsecp256k1", "secp256k1", "sp-core-hashing", diff --git a/primitives/core/hashing/Cargo.toml b/primitives/core/hashing/Cargo.toml index efe38af1602dd..d85e28d1b2e56 100644 --- a/primitives/core/hashing/Cargo.toml +++ b/primitives/core/hashing/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sp-core-hashing" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -blake2 = { version = "0.10.4", default-features = false } +blake2 = { version = "0.10.2", default-features = false } byteorder = { version = "1.3.2", default-features = false } digest = { version = "0.10.3", default-features = false } sha2 = { version = "0.10.2", default-features = false } @@ -24,7 +24,6 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../std" } [features] default = ["std"] std = [ - "digest/std", "blake2/std", "byteorder/std", "sha2/std", diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 06703acea7202..80b44449dbac1 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -132,7 +132,9 @@ impl DeriveJunction { let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); index.using_encoded(|data| { if data.len() > JUNCTION_ID_LEN { - cc.copy_from_slice(&sp_core_hashing::blake2_256(data)); + let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); + let hash = hash_result.as_bytes(); + cc.copy_from_slice(hash); } else { cc[0..data.len()].copy_from_slice(data); } @@ -290,7 +292,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { } let hash = ss58hash(&data[0..body_len + prefix_len]); - let checksum = &hash[0..CHECKSUM_LEN]; + let checksum = &hash.as_bytes()[0..CHECKSUM_LEN]; if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. return Err(PublicError::InvalidChecksum) @@ -331,7 +333,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { }; v.extend(self.as_ref()); let r = ss58hash(&v); - v.extend(&r[0..2]); + v.extend(&r.as_bytes()[0..2]); v.to_base58() } @@ -364,13 +366,11 @@ pub trait Derive: Sized { const PREFIX: &[u8] = b"SS58PRE"; #[cfg(feature = "std")] -fn ss58hash(data: &[u8]) -> Vec { - use blake2::{Blake2b512, Digest}; - - let mut ctx = Blake2b512::new(); - ctx.update(PREFIX); - ctx.update(data); - ctx.finalize().to_vec() +fn ss58hash(data: &[u8]) -> blake2_rfc::blake2b::Blake2bResult { + let mut context = blake2_rfc::blake2b::Blake2b::new(64); + context.update(PREFIX); + context.update(data); + context.finalize() } /// Default prefix number @@ -421,7 +421,7 @@ impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T { let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { - let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?; + let d = hex::decode(stripped).map_err(|_| PublicError::InvalidFormat)?; Self::from_slice(&d).map_err(|()| PublicError::BadLength)? } else { Self::from_ss58check(s)? @@ -614,7 +614,10 @@ impl sp_std::str::FromStr for AccountId32 { fn from_str(s: &str) -> Result { let hex_or_ss58_without_prefix = s.trim_start_matches("0x"); if hex_or_ss58_without_prefix.len() == 64 { - array_bytes::hex_n_into(hex_or_ss58_without_prefix).map_err(|_| "invalid hex address.") + let mut bytes = [0u8; 32]; + hex::decode_to_slice(hex_or_ss58_without_prefix, &mut bytes) + .map_err(|_| "invalid hex address.") + .map(|_| Self::from(bytes)) } else { Self::from_ss58check(s).map_err(|_| "invalid ss58 address.") } @@ -940,7 +943,7 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { password_override.or_else(|| password.as_ref().map(|p| p.expose_secret().as_str())); let (root, seed) = if let Some(stripped) = phrase.expose_secret().strip_prefix("0x") { - array_bytes::hex2bytes(stripped) + hex::decode(stripped) .ok() .and_then(|seed_vec| { let mut seed = Self::Seed::default(); @@ -984,11 +987,6 @@ pub trait IsWrappedBy: From + Into { pub trait Wraps: Sized { /// The inner type it is wrapping. type Inner: IsWrappedBy; - - /// Get a reference to the inner type that is wrapped. - fn as_inner_ref(&self) -> &Self::Inner { - Self::Inner::from_ref(self) - } } impl IsWrappedBy for T @@ -1129,6 +1127,7 @@ pub mod key_types { mod tests { use super::*; use crate::DeriveJunction; + use hex_literal::hex; #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { @@ -1270,7 +1269,7 @@ mod tests { fn interpret_std_seed_should_work() { assert_eq!( TestPair::from_string("0x0123456789abcdef", None), - Ok(TestPair::Seed(array_bytes::hex2bytes_unchecked("0123456789abcdef"))) + Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) ); } @@ -1312,14 +1311,6 @@ mod tests { path: vec![DeriveJunction::soft("DOT")] }) ); - assert_eq!( - TestPair::from_string("hello world/0123456789012345678901234567890123456789", None), - Ok(TestPair::Standard { - phrase: "hello world".to_owned(), - password: None, - path: vec![DeriveJunction::soft("0123456789012345678901234567890123456789")] - }) - ); assert_eq!( TestPair::from_string("hello world//1", None), Ok(TestPair::Standard { @@ -1336,14 +1327,6 @@ mod tests { path: vec![DeriveJunction::hard("DOT")] }) ); - assert_eq!( - TestPair::from_string("hello world//0123456789012345678901234567890123456789", None), - Ok(TestPair::Standard { - phrase: "hello world".to_owned(), - password: None, - path: vec![DeriveJunction::hard("0123456789012345678901234567890123456789")] - }) - ); assert_eq!( TestPair::from_string("hello world//1/DOT", None), Ok(TestPair::Standard { diff --git a/primitives/core/src/defer.rs b/primitives/core/src/defer.rs deleted file mode 100644 index d14b26d59e4dd..0000000000000 --- a/primitives/core/src/defer.rs +++ /dev/null @@ -1,140 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Contains the [`crate::defer!`] macro for *deferring* the execution -//! of code until the current scope is dropped. -//! This helps with *always* executing cleanup code. - -/// Executes the wrapped closure on drop. -/// -/// Should be used together with the [`crate::defer!`] macro. -#[must_use] -pub struct DeferGuard(pub Option); - -impl Drop for DeferGuard { - fn drop(&mut self) { - self.0.take().map(|f| f()); - } -} - -/// Executes the given code when the current scope is dropped. -/// -/// Multiple calls to [`crate::defer!`] will execute the passed codes in reverse order. -/// This also applies to panic stack unwinding. -/// -/// # Example -/// -/// ```rust -/// use sp_core::defer; -/// -/// let message = std::cell::RefCell::new("".to_string()); -/// { -/// defer!( -/// message.borrow_mut().push_str("world!"); -/// ); -/// defer!( -/// message.borrow_mut().push_str("Hello "); -/// ); -/// } -/// assert_eq!(*message.borrow(), "Hello world!"); -/// ``` -#[macro_export] -macro_rules! defer( - ( $( $code:tt )* ) => { - let _guard = $crate::defer::DeferGuard(Some(|| { $( $code )* })); - }; -); - -#[cfg(test)] -mod test { - #[test] - fn defer_guard_works() { - let mut called = false; - { - defer!( - called = true; - ); - } - assert!(called, "DeferGuard should have executed the closure"); - } - - #[test] - /// `defer` executes the code in reverse order of being called. - fn defer_guard_order_works() { - let called = std::cell::RefCell::new(1); - - defer!( - assert_eq!(*called.borrow(), 3); - ); - defer!( - assert_eq!(*called.borrow(), 2); - *called.borrow_mut() = 3; - ); - defer!({ - assert_eq!(*called.borrow(), 1); - *called.borrow_mut() = 2; - }); - } - - #[test] - #[allow(unused_braces)] - #[allow(clippy::unnecessary_operation)] - fn defer_guard_syntax_works() { - let called = std::cell::RefCell::new(0); - { - defer!(*called.borrow_mut() += 1); - defer!(*called.borrow_mut() += 1;); // With ; - defer!({ *called.borrow_mut() += 1 }); - defer!({ *called.borrow_mut() += 1 };); // With ; - } - assert_eq!(*called.borrow(), 4); - } - - #[test] - /// `defer` executes the code even in case of a panic. - fn defer_guard_panic_unwind_works() { - use std::panic::{catch_unwind, AssertUnwindSafe}; - let mut called = false; - - let should_panic = catch_unwind(AssertUnwindSafe(|| { - defer!(called = true); - panic!(); - })); - - assert!(should_panic.is_err(), "DeferGuard should have panicked"); - assert!(called, "DeferGuard should have executed the closure"); - } - - #[test] - /// `defer` executes the code even in case another `defer` panics. - fn defer_guard_defer_panics_unwind_works() { - use std::panic::{catch_unwind, AssertUnwindSafe}; - let counter = std::cell::RefCell::new(0); - - let should_panic = catch_unwind(AssertUnwindSafe(|| { - defer!(*counter.borrow_mut() += 1); - defer!( - *counter.borrow_mut() += 1; - panic!(); - ); - defer!(*counter.borrow_mut() += 1); - })); - - assert!(should_panic.is_err(), "DeferGuard should have panicked"); - assert_eq!(*counter.borrow(), 3, "DeferGuard should have executed the closure"); - } -} diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index ca6b800625bc2..d56f65fd289e7 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -229,7 +229,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&hex::encode(self)) } } @@ -239,7 +239,7 @@ impl<'de> Deserialize<'de> for Signature { where D: Deserializer<'de>, { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) .map_err(|e| de::Error::custom(format!("{:?}", e))) @@ -597,6 +597,7 @@ mod test { set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, DEV_PHRASE, }; + use hex_literal::hex; use serde_json; #[test] @@ -611,35 +612,31 @@ mod test { #[test] fn seed_and_derive_should_work() { - let seed = array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - ); + let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); let pair = Pair::from_seed(&seed); assert_eq!(pair.seed(), seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap(); assert_eq!( derived.0.seed(), - array_bytes::hex2array_unchecked::<32>( - "b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61" - ) + hex!("b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61") ); } #[test] fn test_vector_should_work() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let public = pair.public(); assert_eq!( public, Public::from_full( - &array_bytes::hex2bytes_unchecked("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4"), + &hex!("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4")[..], ).unwrap(), ); let message = b""; - let signature = array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); + let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -656,11 +653,11 @@ mod test { assert_eq!( public, Public::from_full( - &array_bytes::hex2bytes_unchecked("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4"), + &hex!("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4")[..], ).unwrap(), ); let message = b""; - let signature = array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); + let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -683,10 +680,10 @@ mod test { assert_eq!( public, Public::from_full( - &array_bytes::hex2bytes_unchecked("5676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba995840f3de562156558efbfdac3f16af0065e5f66795f4dd8262a228ef8c6d813"), + &hex!("5676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba995840f3de562156558efbfdac3f16af0065e5f66795f4dd8262a228ef8c6d813")[..], ).unwrap(), ); - let message = array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); assert!(Pair::verify(&signature, &message[..], &public)); diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index e85eb87c9fd83..177af0651c0ef 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -39,9 +39,7 @@ use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "std")] use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use core::convert::TryFrom; -#[cfg(feature = "full_crypto")] -use ed25519_zebra::{SigningKey, VerificationKey}; +use ed25519_dalek::{Signer as _, Verifier as _}; #[cfg(feature = "std")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; @@ -77,10 +75,17 @@ pub struct Public(pub [u8; 32]); /// A key pair. #[cfg(feature = "full_crypto")] -#[derive(Copy, Clone)] -pub struct Pair { - public: VerificationKey, - secret: SigningKey, +pub struct Pair(ed25519_dalek::Keypair); + +#[cfg(feature = "full_crypto")] +impl Clone for Pair { + fn clone(&self) -> Self { + Pair(ed25519_dalek::Keypair { + public: self.0.public, + secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) + .expect("key is always the correct size; qed"), + }) + } } impl AsRef<[u8; 32]> for Public { @@ -228,7 +233,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&hex::encode(self)) } } @@ -238,7 +243,7 @@ impl<'de> Deserialize<'de> for Signature { where D: Deserializer<'de>, { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) .map_err(|e| de::Error::custom(format!("{:?}", e))) @@ -451,10 +456,10 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = - SigningKey::try_from(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; - let public = VerificationKey::from(&secret); - Ok(Pair { secret, public }) + let secret = ed25519_dalek::SecretKey::from_bytes(seed_slice) + .map_err(|_| SecretStringError::InvalidSeedLength)?; + let public = ed25519_dalek::PublicKey::from(&secret); + Ok(Pair(ed25519_dalek::Keypair { secret, public })) } /// Derive a child key from a series of given junctions. @@ -463,7 +468,7 @@ impl TraitPair for Pair { path: Iter, _seed: Option, ) -> Result<(Pair, Option), DeriveError> { - let mut acc = self.secret.into(); + let mut acc = self.0.secret.to_bytes(); for j in path { match j { DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), @@ -475,12 +480,16 @@ impl TraitPair for Pair { /// Get the public key. fn public(&self) -> Public { - Public(self.public.into()) + let mut r = [0u8; 32]; + let pk = self.0.public.as_bytes(); + r.copy_from_slice(pk); + Public(r) } /// Sign a message. fn sign(&self, message: &[u8]) -> Signature { - Signature::from_raw(self.secret.sign(message).into()) + let r = self.0.sign(message).to_bytes(); + Signature::from_raw(r) } /// Verify a signature on a message. Returns true if the signature is good. @@ -493,17 +502,17 @@ impl TraitPair for Pair { /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct /// size. Use it only if you're coming from byte buffers and need the speed. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = match VerificationKey::try_from(pubkey.as_ref()) { + let public_key = match ed25519_dalek::PublicKey::from_bytes(pubkey.as_ref()) { Ok(pk) => pk, Err(_) => return false, }; - let sig = match ed25519_zebra::Signature::try_from(sig) { + let sig = match ed25519_dalek::Signature::try_from(sig) { Ok(s) => s, Err(_) => return false, }; - public_key.verify(&sig, message.as_ref()).is_ok() + public_key.verify(message.as_ref(), &sig).is_ok() } /// Return a vec filled with raw data. @@ -515,8 +524,8 @@ impl TraitPair for Pair { #[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. - pub fn seed(&self) -> Seed { - self.secret.into() + pub fn seed(&self) -> &Seed { + self.0.secret.as_bytes() } /// Exactly as `from_string` except that if no matches are found then, the the first 32 @@ -551,6 +560,7 @@ impl CryptoType for Pair { mod test { use super::*; use crate::crypto::DEV_PHRASE; + use hex_literal::hex; use serde_json; #[test] @@ -565,35 +575,31 @@ mod test { #[test] fn seed_and_derive_should_work() { - let seed = array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - ); + let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); let pair = Pair::from_seed(&seed); - assert_eq!(pair.seed(), seed); + assert_eq!(pair.seed(), &seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.seed(), - array_bytes::hex2array_unchecked::<32>( - "ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c" - ) + &hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") ); } #[test] fn test_vector_should_work() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let public = pair.public(); assert_eq!( public, - Public::from_raw(array_bytes::hex2array_unchecked( + Public::from_raw(hex!( "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" )) ); let message = b""; - let signature = array_bytes::hex2array_unchecked("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); + let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -609,12 +615,12 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::from_raw(array_bytes::hex2array_unchecked( + Public::from_raw(hex!( "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" )) ); let message = b""; - let signature = array_bytes::hex2array_unchecked("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); + let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -636,11 +642,11 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::from_raw(array_bytes::hex2array_unchecked( + Public::from_raw(hex!( "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" )) ); - let message = array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); assert!(Pair::verify(&signature, &message[..], &public)); diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index fda7604d5337f..ab3334f9e4f5a 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -40,6 +40,8 @@ pub use serde; use serde::{Deserialize, Serialize}; use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; use sp_std::{ops::Deref, prelude::*}; +#[cfg(feature = "std")] +use std::borrow::Cow; pub use sp_debug_derive::RuntimeDebug; @@ -51,11 +53,9 @@ pub mod hashing; #[cfg(feature = "full_crypto")] pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; -pub mod bounded; pub mod crypto; pub mod hexdisplay; -pub mod defer; pub mod ecdsa; pub mod ed25519; pub mod hash; @@ -207,6 +207,85 @@ impl OpaquePeerId { } } +/// Something that is either a native or an encoded value. +#[cfg(feature = "std")] +pub enum NativeOrEncoded { + /// The native representation. + Native(R), + /// The encoded representation. + Encoded(Vec), +} + +#[cfg(feature = "std")] +impl From for NativeOrEncoded { + fn from(val: R) -> Self { + Self::Native(val) + } +} + +#[cfg(feature = "std")] +impl sp_std::fmt::Debug for NativeOrEncoded { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + hexdisplay::HexDisplay::from(&self.as_encoded().as_ref()).fmt(f) + } +} + +#[cfg(feature = "std")] +impl NativeOrEncoded { + /// Return the value as the encoded format. + pub fn as_encoded(&self) -> Cow<'_, [u8]> { + match self { + NativeOrEncoded::Encoded(e) => Cow::Borrowed(e.as_slice()), + NativeOrEncoded::Native(n) => Cow::Owned(n.encode()), + } + } + + /// Return the value as the encoded format. + pub fn into_encoded(self) -> Vec { + match self { + NativeOrEncoded::Encoded(e) => e, + NativeOrEncoded::Native(n) => n.encode(), + } + } +} + +#[cfg(feature = "std")] +impl PartialEq for NativeOrEncoded { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (NativeOrEncoded::Native(l), NativeOrEncoded::Native(r)) => l == r, + (NativeOrEncoded::Native(n), NativeOrEncoded::Encoded(e)) | + (NativeOrEncoded::Encoded(e), NativeOrEncoded::Native(n)) => + Some(n) == codec::Decode::decode(&mut &e[..]).ok().as_ref(), + (NativeOrEncoded::Encoded(l), NativeOrEncoded::Encoded(r)) => l == r, + } + } +} + +/// A value that is never in a native representation. +/// This is type is useful in conjunction with `NativeOrEncoded`. +#[cfg(feature = "std")] +#[derive(PartialEq)] +pub enum NeverNativeValue {} + +#[cfg(feature = "std")] +impl codec::Encode for NeverNativeValue { + fn encode(&self) -> Vec { + // The enum is not constructable, so this function should never be callable! + unreachable!() + } +} + +#[cfg(feature = "std")] +impl codec::EncodeLike for NeverNativeValue {} + +#[cfg(feature = "std")] +impl codec::Decode for NeverNativeValue { + fn decode(_: &mut I) -> Result { + Err("`NeverNativeValue` should never be decoded".into()) + } +} + /// Provide a simple 4 byte identifier for a type. pub trait TypeId { /// Simple 4 byte identifier. @@ -386,239 +465,3 @@ macro_rules! impl_maybe_marker { // The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for // everybody. pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB - -/// A trait for querying a single value from a type defined in the trait. -/// -/// It is not required that the value is constant. -pub trait TypedGet { - /// The type which is returned. - type Type; - /// Return the current value. - fn get() -> Self::Type; -} - -/// A trait for querying a single value from a type. -/// -/// It is not required that the value is constant. -pub trait Get { - /// Return the current value. - fn get() -> T; -} - -impl Get for () { - fn get() -> T { - T::default() - } -} - -/// Implement Get by returning Default for any type that implements Default. -pub struct GetDefault; -impl Get for GetDefault { - fn get() -> T { - T::default() - } -} - -macro_rules! impl_const_get { - ($name:ident, $t:ty) => { - #[doc = "Const getter for a basic type."] - #[derive($crate::RuntimeDebug)] - pub struct $name; - impl Get<$t> for $name { - fn get() -> $t { - T - } - } - impl Get> for $name { - fn get() -> Option<$t> { - Some(T) - } - } - impl TypedGet for $name { - type Type = $t; - fn get() -> $t { - T - } - } - }; -} - -impl_const_get!(ConstBool, bool); -impl_const_get!(ConstU8, u8); -impl_const_get!(ConstU16, u16); -impl_const_get!(ConstU32, u32); -impl_const_get!(ConstU64, u64); -impl_const_get!(ConstU128, u128); -impl_const_get!(ConstI8, i8); -impl_const_get!(ConstI16, i16); -impl_const_get!(ConstI32, i32); -impl_const_get!(ConstI64, i64); -impl_const_get!(ConstI128, i128); - -/// Try and collect into a collection `C`. -pub trait TryCollect { - /// The error type that gets returned when a collection can't be made from `self`. - type Error; - /// Consume self and try to collect the results into `C`. - /// - /// This is useful in preventing the undesirable `.collect().try_into()` call chain on - /// collections that need to be converted into a bounded type (e.g. `BoundedVec`). - fn try_collect(self) -> Result; -} - -/// Create new implementations of the [`Get`](crate::Get) trait. -/// -/// The so-called parameter type can be created in four different ways: -/// -/// - Using `const` to create a parameter type that provides a `const` getter. It is required that -/// the `value` is const. -/// -/// - Declare the parameter type without `const` to have more freedom when creating the value. -/// -/// NOTE: A more substantial version of this macro is available in `frame_support` crate which -/// allows mutable and persistant variants. -/// -/// # Examples -/// -/// ``` -/// # use sp_core::Get; -/// # use sp_core::parameter_types; -/// // This function cannot be used in a const context. -/// fn non_const_expression() -> u64 { 99 } -/// -/// const FIXED_VALUE: u64 = 10; -/// parameter_types! { -/// pub const Argument: u64 = 42 + FIXED_VALUE; -/// /// Visibility of the type is optional -/// OtherArgument: u64 = non_const_expression(); -/// } -/// -/// trait Config { -/// type Parameter: Get; -/// type OtherParameter: Get; -/// } -/// -/// struct Runtime; -/// impl Config for Runtime { -/// type Parameter = Argument; -/// type OtherParameter = OtherArgument; -/// } -/// ``` -/// -/// # Invalid example: -/// -/// ```compile_fail -/// # use sp_core::Get; -/// # use sp_core::parameter_types; -/// // This function cannot be used in a const context. -/// fn non_const_expression() -> u64 { 99 } -/// -/// parameter_types! { -/// pub const Argument: u64 = non_const_expression(); -/// } -/// ``` -#[macro_export] -macro_rules! parameter_types { - ( - $( #[ $attr:meta ] )* - $vis:vis const $name:ident: $type:ty = $value:expr; - $( $rest:tt )* - ) => ( - $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(@IMPL_CONST $name , $type , $value); - $crate::parameter_types!( $( $rest )* ); - ); - ( - $( #[ $attr:meta ] )* - $vis:vis $name:ident: $type:ty = $value:expr; - $( $rest:tt )* - ) => ( - $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(@IMPL $name, $type, $value); - $crate::parameter_types!( $( $rest )* ); - ); - () => (); - (@IMPL_CONST $name:ident, $type:ty, $value:expr) => { - impl $name { - /// Returns the value of this parameter type. - pub const fn get() -> $type { - $value - } - } - - impl> $crate::Get for $name { - fn get() -> I { - I::from(Self::get()) - } - } - - impl $crate::TypedGet for $name { - type Type = $type; - fn get() -> $type { - Self::get() - } - } - }; - (@IMPL $name:ident, $type:ty, $value:expr) => { - impl $name { - /// Returns the value of this parameter type. - pub fn get() -> $type { - $value - } - } - - impl> $crate::Get for $name { - fn get() -> I { - I::from(Self::get()) - } - } - - impl $crate::TypedGet for $name { - type Type = $type; - fn get() -> $type { - Self::get() - } - } - }; -} - -/// Build a bounded vec from the given literals. -/// -/// The type of the outcome must be known. -/// -/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding -/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. -#[macro_export] -#[cfg(feature = "std")] -macro_rules! bounded_vec { - ($ ($values:expr),* $(,)?) => { - { - $crate::sp_std::vec![$($values),*].try_into().unwrap() - } - }; - ( $value:expr ; $repetition:expr ) => { - { - $crate::sp_std::vec![$value ; $repetition].try_into().unwrap() - } - } -} - -/// Build a bounded btree-map from the given literals. -/// -/// The type of the outcome must be known. -/// -/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding -/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. -#[macro_export] -#[cfg(feature = "std")] -macro_rules! bounded_btree_map { - ($ ( $key:expr => $value:expr ),* $(,)?) => { - { - $crate::TryCollect::<$crate::bounded::BoundedBTreeMap<_, _, _>>::try_collect( - $crate::sp_std::vec![$(($key, $value)),*].into_iter() - ).unwrap() - } - }; -} diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 9064fb7427393..ef033c2099b5f 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -233,7 +233,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) + serializer.serialize_str(&hex::encode(self)) } } @@ -243,7 +243,7 @@ impl<'de> Deserialize<'de> for Signature { where D: Deserializer<'de>, { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) .map_err(|e| de::Error::custom(format!("{:?}", e))) @@ -664,6 +664,7 @@ pub fn verify_batch( mod compatibility_test { use super::*; use crate::crypto::DEV_PHRASE; + use hex_literal::hex; // NOTE: tests to ensure addresses that are created with the `0.1.x` version (pre-audit) are // still functional. @@ -672,9 +673,7 @@ mod compatibility_test { fn derive_soft_known_pair_should_work() { let pair = Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).unwrap(); // known address of DEV_PHRASE with 1.1 - let known = array_bytes::hex2bytes_unchecked( - "d6c71059dbbe9ad2b0ed3f289738b800836eb425544ce694825285b958ca755e", - ); + let known = hex!("d6c71059dbbe9ad2b0ed3f289738b800836eb425544ce694825285b958ca755e"); assert_eq!(pair.public().to_raw_vec(), known); } @@ -682,19 +681,17 @@ mod compatibility_test { fn derive_hard_known_pair_should_work() { let pair = Pair::from_string(&format!("{}//Alice", DEV_PHRASE), None).unwrap(); // known address of DEV_PHRASE with 1.1 - let known = array_bytes::hex2bytes_unchecked( - "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d", - ); + let known = hex!("d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"); assert_eq!(pair.public().to_raw_vec(), known); } #[test] fn verify_known_old_message_should_work() { - let public = Public::from_raw(array_bytes::hex2array_unchecked( - "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918", + let public = Public::from_raw(hex!( + "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918" )); // signature generated by the 1.1 version with the same ^^ public key. - let signature = Signature::from_raw(array_bytes::hex2array_unchecked( + let signature = Signature::from_raw(hex!( "5a9755f069939f45d96aaf125cf5ce7ba1db998686f87f2fb3cbdea922078741a73891ba265f70c31436e18a9acd14d189d73c12317ab6c313285cd938453202" )); let message = b"Verifying that I am the owner of 5G9hQLdsKQswNPgB499DeA5PkFBbgkLPJWkkS6FAM6xGQ8xD. Hash: 221455a3\n"; @@ -707,6 +704,7 @@ mod compatibility_test { mod test { use super::*; use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; + use hex_literal::hex; use serde_json; #[test] @@ -747,8 +745,8 @@ mod test { #[test] fn derive_soft_should_work() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let derive_1 = pair.derive(Some(DeriveJunction::soft(1)).into_iter(), None).unwrap().0; let derive_1b = pair.derive(Some(DeriveJunction::soft(1)).into_iter(), None).unwrap().0; @@ -759,8 +757,8 @@ mod test { #[test] fn derive_hard_should_work() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let derive_1 = pair.derive(Some(DeriveJunction::hard(1)).into_iter(), None).unwrap().0; let derive_1b = pair.derive(Some(DeriveJunction::hard(1)).into_iter(), None).unwrap().0; @@ -771,8 +769,8 @@ mod test { #[test] fn derive_soft_public_should_work() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let path = Some(DeriveJunction::soft(1)); let pair_1 = pair.derive(path.into_iter(), None).unwrap().0; @@ -782,8 +780,8 @@ mod test { #[test] fn derive_hard_public_should_fail() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let path = Some(DeriveJunction::hard(1)); assert!(pair.public().derive(path.into_iter()).is_none()); @@ -791,13 +789,13 @@ mod test { #[test] fn sr_test_vector_should_work() { - let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let public = pair.public(); assert_eq!( public, - Public::from_raw(array_bytes::hex2array_unchecked( + Public::from_raw(hex!( "44a996beb1eef7bdcab976ab6d2ca26104834164ecf28fb375600576fcc6eb0f" )) ); @@ -842,11 +840,11 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::from_raw(array_bytes::hex2array_unchecked( + Public::from_raw(hex!( "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" )) ); - let message = array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -867,11 +865,11 @@ mod test { // schnorrkel-js. // // This is to make sure that the wasm library is compatible. - let pk = Pair::from_seed(&array_bytes::hex2array_unchecked( - "0000000000000000000000000000000000000000000000000000000000000000", + let pk = Pair::from_seed(&hex!( + "0000000000000000000000000000000000000000000000000000000000000000" )); let public = pk.public(); - let js_signature = Signature::from_raw(array_bytes::hex2array_unchecked( + let js_signature = Signature::from_raw(hex!( "28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00" )); assert!(Pair::verify_deprecated(&js_signature, b"SUBSTRATE", &public)); diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index d3fa3fc86fce5..d5ca1dc45fa0c 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -90,7 +90,7 @@ macro_rules! wasm_export_functions { &mut &input[..], ).expect("Input data is correctly encoded"); - (|| { $( $fn_impl )* })() + $( $fn_impl )* } $crate::to_substrate_wasm_fn_return_value(&()) @@ -118,7 +118,7 @@ macro_rules! wasm_export_functions { &mut &input[..], ).expect("Input data is correctly encoded"); - (|| { $( $fn_impl )* })() + $( $fn_impl )* }; $crate::to_substrate_wasm_fn_return_value(&output) diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index c4b7f20f7e9a0..80e8963a2909d 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -20,6 +20,7 @@ use std::{ borrow::Cow, fmt::{Debug, Display}, + panic::UnwindSafe, }; pub use sp_externalities::{Externalities, ExternalitiesExt}; @@ -31,14 +32,18 @@ pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'stat /// Call a given method in the runtime. Returns a tuple of the result (either the output data /// or an execution error) together with a `bool`, which is true if native execution was used. - fn call( + fn call< + R: codec::Codec + PartialEq, + NC: FnOnce() -> Result> + UnwindSafe, + >( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, method: &str, data: &[u8], use_native: bool, - ) -> (Result, Self::Error>, bool); + native_call: Option, + ) -> (Result, Self::Error>, bool); } /// Something that can fetch the runtime `:code`. @@ -179,6 +184,12 @@ pub trait RuntimeSpawn: Send { fn join(&self, handle: u64) -> Vec; } +#[cfg(feature = "std")] +sp_externalities::decl_extension! { + /// Extension that supports spawning extra runtime instances in externalities. + pub struct RuntimeSpawnExt(Box); +} + /// Something that can spawn tasks (blocking and non-blocking) with an assigned name /// and optional group. #[dyn_clonable::clonable] diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index 4bf914bde2ee1..f4eb3a19ac36c 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -35,6 +35,7 @@ mod tests { ($name::from(2), "0x2"), ($name::from(10), "0xa"), ($name::from(15), "0xf"), + ($name::from(15), "0xf"), ($name::from(16), "0x10"), ($name::from(1_000), "0x3e8"), ($name::from(100_000), "0x186a0"), @@ -51,6 +52,8 @@ mod tests { assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } }; } diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index f19a647fed032..5aa3d9a239aa3 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -11,5 +11,5 @@ documentation = "https://docs.rs/sp-database" readme = "README.md" [dependencies] -kvdb = "0.12.0" -parking_lot = "0.12.1" +kvdb = "0.11.0" +parking_lot = "0.12.0" diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index f1584dc673228..4be42c3d19b6c 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -29,10 +29,7 @@ use codec::{Codec, Decode, Encode, Input}; use scale_info::TypeInfo; #[cfg(feature = "std")] use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; -use sp_runtime::{ - traits::{Header as HeaderT, NumberFor}, - ConsensusEngineId, RuntimeDebug, -}; +use sp_runtime::{traits::NumberFor, ConsensusEngineId, RuntimeDebug}; use sp_std::{borrow::Cow, vec::Vec}; #[cfg(feature = "std")] @@ -79,64 +76,6 @@ pub type RoundNumber = u64; /// A list of Grandpa authorities with associated weights. pub type AuthorityList = Vec<(AuthorityId, AuthorityWeight)>; -/// A GRANDPA message for a substrate chain. -pub type Message
= grandpa::Message<
::Hash,
::Number>; - -/// A signed message. -pub type SignedMessage
= grandpa::SignedMessage< -
::Hash, -
::Number, - AuthoritySignature, - AuthorityId, ->; - -/// A primary propose message for this chain's block type. -pub type PrimaryPropose
= - grandpa::PrimaryPropose<
::Hash,
::Number>; -/// A prevote message for this chain's block type. -pub type Prevote
= grandpa::Prevote<
::Hash,
::Number>; -/// A precommit message for this chain's block type. -pub type Precommit
= - grandpa::Precommit<
::Hash,
::Number>; -/// A catch up message for this chain's block type. -pub type CatchUp
= grandpa::CatchUp< -
::Hash, -
::Number, - AuthoritySignature, - AuthorityId, ->; -/// A commit message for this chain's block type. -pub type Commit
= grandpa::Commit< -
::Hash, -
::Number, - AuthoritySignature, - AuthorityId, ->; - -/// A compact commit message for this chain's block type. -pub type CompactCommit
= grandpa::CompactCommit< -
::Hash, -
::Number, - AuthoritySignature, - AuthorityId, ->; - -/// A GRANDPA justification for block finality, it includes a commit message and -/// an ancestry proof including all headers routing all precommit target blocks -/// to the commit target block. Due to the current voting strategy the precommit -/// targets should be the same as the commit target, since honest voters don't -/// vote past authority set change blocks. -/// -/// This is meant to be stored in the db and passed around the network to other -/// nodes, and are used by syncing nodes to prove authority set handoffs. -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Debug))] -pub struct GrandpaJustification { - pub round: u64, - pub commit: Commit
, - pub votes_ancestries: Vec
, -} - /// A scheduled change of authority set. #[cfg_attr(feature = "std", derive(Serialize))] #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index b176147c053a6..c7e10be32fe28 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -14,12 +14,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.50", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.30", optional = true } sp-core = { version = "6.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "6.0.0", optional = true, default-features = false, path = "../runtime" } +sp-runtime = { version = "6.0.0", optional = true, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] @@ -31,7 +31,7 @@ std = [ "async-trait", "codec/std", "sp-core/std", - "sp-runtime/std", + "sp-runtime", "sp-std/std", "thiserror", ] diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 26dec17e032dd..fda6b7fdd11dd 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -22,15 +22,15 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-keystore = { version = "0.12.0", default-features = false, optional = true, path = "../keystore" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { version = "0.12.0", default-features = false, optional = true, path = "../state-machine" } +sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machine" } sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-features = false } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "6.0.0", default-features = false, optional = true, path = "../trie" } +sp-trie = { version = "6.0.0", optional = true, path = "../trie" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.17", optional = true } futures = { version = "0.3.21", features = ["thread-pool"], optional = true } -parking_lot = { version = "0.12.1", optional = true } +parking_lot = { version = "0.12.0", optional = true } secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} @@ -38,15 +38,14 @@ tracing-core = { version = "0.1.28", default-features = false} [features] default = ["std"] std = [ - "bytes/std", "sp-externalities/std", "sp-core/std", "sp-keystore", "codec/std", "sp-std/std", "hash-db/std", - "sp-trie/std", - "sp-state-machine/std", + "sp-trie", + "sp-state-machine", "libsecp256k1", "secp256k1", "sp-runtime-interface/std", diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 33516bb0397f3..9bf9345e594c3 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -40,7 +40,7 @@ use sp_core::{ hexdisplay::HexDisplay, offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, storage::ChildInfo, - traits::TaskExecutorExt, + traits::{RuntimeSpawnExt, TaskExecutorExt}, }; #[cfg(feature = "std")] use sp_keystore::{KeystoreExt, SyncCryptoStore}; @@ -1657,6 +1657,38 @@ pub trait Sandbox { } } +/// Wasm host functions for managing tasks. +/// +/// This should not be used directly. Use `sp_tasks` for running parallel tasks instead. +#[runtime_interface(wasm_only)] +pub trait RuntimeTasks { + /// Wasm host function for spawning task. + /// + /// This should not be used directly. Use `sp_tasks::spawn` instead. + fn spawn(dispatcher_ref: u32, entry: u32, payload: Vec) -> u64 { + sp_externalities::with_externalities(|mut ext| { + let runtime_spawn = ext + .extension::() + .expect("Cannot spawn without dynamic runtime dispatcher (RuntimeSpawnExt)"); + runtime_spawn.spawn_call(dispatcher_ref, entry, payload) + }) + .expect("`RuntimeTasks::spawn`: called outside of externalities context") + } + + /// Wasm host function for joining a task. + /// + /// This should not be used directly. Use `join` of `sp_tasks::spawn` result instead. + fn join(handle: u64) -> Vec { + sp_externalities::with_externalities(|mut ext| { + let runtime_spawn = ext + .extension::() + .expect("Cannot join without dynamic runtime dispatcher (RuntimeSpawnExt)"); + runtime_spawn.join(handle) + }) + .expect("`RuntimeTasks::join`: called outside of externalities context") + } +} + /// Allocator used by Substrate when executing the Wasm runtime. #[cfg(all(target_arch = "wasm32", not(feature = "std")))] struct WasmAllocator; @@ -1735,6 +1767,7 @@ pub type SubstrateHostFunctions = ( sandbox::HostFunctions, crate::trie::HostFunctions, offchain_index::HostFunctions, + runtime_tasks::HostFunctions, transaction_index::HostFunctions, ); @@ -1862,7 +1895,6 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { let pair = sr25519::Pair::generate_with_phrase(None).0; - let pair_unused = sr25519::Pair::generate_with_phrase(None).0; crypto::start_batch_verify(); for it in 0..70 { let msg = format!("Schnorrkel {}!", it); @@ -1870,10 +1902,8 @@ mod tests { crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public()); } - // push invalid - let msg = b"asdf!"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair_unused.public()); + // push invlaid + crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); assert!(!crypto::finish_batch_verify()); crypto::start_batch_verify(); @@ -1908,10 +1938,10 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { - // valid ed25519 signature + // invalid ed25519 signature crypto::start_batch_verify(); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - assert!(crypto::finish_batch_verify()); + assert!(!crypto::finish_batch_verify()); // 2 valid ed25519 signatures crypto::start_batch_verify(); @@ -1931,14 +1961,12 @@ mod tests { // 1 valid, 1 invalid ed25519 signature crypto::start_batch_verify(); - let pair1 = ed25519::Pair::generate_with_phrase(None).0; - let pair2 = ed25519::Pair::generate_with_phrase(None).0; + let pair = ed25519::Pair::generate_with_phrase(None).0; let msg = b"Important message"; - let signature = pair1.sign(msg); + let signature = pair.sign(msg); + crypto::ed25519_batch_verify(&signature, msg, &pair.public()); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - crypto::ed25519_batch_verify(&signature, msg, &pair1.public()); - crypto::ed25519_batch_verify(&signature, msg, &pair2.public()); assert!(!crypto::finish_batch_verify()); @@ -1965,13 +1993,11 @@ mod tests { // 1 valid sr25519, 1 invalid sr25519 crypto::start_batch_verify(); - let pair1 = sr25519::Pair::generate_with_phrase(None).0; - let pair2 = sr25519::Pair::generate_with_phrase(None).0; + let pair = sr25519::Pair::generate_with_phrase(None).0; let msg = b"Schnorrkcel!"; - let signature = pair1.sign(msg); + let signature = pair.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair.public()); - crypto::sr25519_batch_verify(&signature, msg, &pair1.public()); - crypto::sr25519_batch_verify(&signature, msg, &pair2.public()); crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); assert!(!crypto::finish_batch_verify()); diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index cbb8a22ba4dd6..3c3b7933c50da 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -13,11 +13,11 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.21" merlin = { version = "2.0", default-features = false } -parking_lot = { version = "0.12.1", default-features = false } +parking_lot = { version = "0.12.0", default-features = false } schnorrkel = { version = "0.9.1", default-features = false, features = ["preaudit_deprecated", "u64_backend"] } serde = { version = "1.0", optional = true } thiserror = "1.0" diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index e857974ba898c..2be3f592b2b20 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -13,7 +13,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } @@ -21,10 +20,9 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } -thiserror = "1.0" [dev-dependencies] -array-bytes = "4.1" +hex-literal = "0.3" [features] default = ["std"] diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index d46cb73c3c5e8..8a2e901aefddf 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -20,12 +20,11 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use scale_info::TypeInfo; use sp_debug_derive::RuntimeDebug; use sp_runtime::traits; -use sp_std::fmt; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; +use sp_std::{fmt, vec}; /// A type to describe node position in the MMR (node index). pub type NodeIndex = u64; @@ -69,6 +68,17 @@ impl OnNewRoot for () { fn on_new_root(_root: &Hash) {} } +/// A MMR proof data for one of the leaves. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct Proof { + /// The index of the leaf the proof is for. + pub leaf_index: LeafIndex, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: NodeIndex, + /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + pub items: Vec, +} + /// A full leaf content stored in the offchain-db. pub trait FullLeaf: Clone + PartialEq + fmt::Debug { /// Encode the leaf either in its full or compact form. @@ -341,9 +351,9 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); -/// An MMR proof data for a group of leaves. -#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct Proof { +/// A MMR proof data for a group of leaves. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct BatchProof { /// The indices of the leaves the proof is for. pub leaf_indices: Vec, /// Number of leaves in MMR, when the proof was generated. @@ -352,40 +362,46 @@ pub struct Proof { pub items: Vec, } +impl BatchProof { + /// Converts batch proof to single leaf proof + pub fn into_single_leaf_proof(proof: BatchProof) -> Result, Error> { + Ok(Proof { + leaf_index: *proof.leaf_indices.get(0).ok_or(Error::InvalidLeafIndex)?, + leaf_count: proof.leaf_count, + items: proof.items, + }) + } +} + +impl Proof { + /// Converts a single leaf proof into a batch proof + pub fn into_batch_proof(proof: Proof) -> BatchProof { + BatchProof { + leaf_indices: vec![proof.leaf_index], + leaf_count: proof.leaf_count, + items: proof.items, + } + } +} /// Merkle Mountain Range operation error. -#[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] pub enum Error { - /// Error during translation of a block number into a leaf index. - #[cfg_attr(feature = "std", error("Error translation block number into leaf index"))] - BlockNumToLeafIndex, /// Error while pushing new node. - #[cfg_attr(feature = "std", error("Error pushing new node"))] Push, /// Error getting the new root. - #[cfg_attr(feature = "std", error("Error getting new root"))] GetRoot, - /// Error committing changes. - #[cfg_attr(feature = "std", error("Error committing changes"))] + /// Error commiting changes. Commit, /// Error during proof generation. - #[cfg_attr(feature = "std", error("Error generating proof"))] GenerateProof, /// Proof verification error. - #[cfg_attr(feature = "std", error("Invalid proof"))] Verify, /// Leaf not found in the storage. - #[cfg_attr(feature = "std", error("Leaf was not found"))] LeafNotFound, /// Mmr Pallet not included in runtime - #[cfg_attr(feature = "std", error("MMR pallet not included in runtime"))] PalletNotIncluded, /// Cannot find the requested leaf index - #[cfg_attr(feature = "std", error("Requested leaf index invalid"))] InvalidLeafIndex, - /// The provided best know block number is invalid. - #[cfg_attr(feature = "std", error("Provided best known block number invalid"))] - InvalidBestKnownBlock, } impl Error { @@ -415,32 +431,48 @@ impl Error { sp_api::decl_runtime_apis! { /// API to interact with MMR pallet. - pub trait MmrApi { + pub trait MmrApi { + /// Generate MMR proof for a leaf under given index. + fn generate_proof(leaf_index: LeafIndex) -> Result<(EncodableOpaqueLeaf, Proof), Error>; + + /// Verify MMR proof against on-chain MMR. + /// + /// Note this function will use on-chain MMR root hash and check if the proof + /// matches the hash. + /// See [Self::verify_proof_stateless] for a stateless verifier. + fn verify_proof(leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; + + /// Verify MMR proof against given root hash. + /// + /// Note this function does not require any on-chain storage - the + /// proof is verified against given MMR root hash. + /// + /// The leaf data is expected to be encoded in its compact form. + fn verify_proof_stateless(root: Hash, leaf: EncodableOpaqueLeaf, proof: Proof) + -> Result<(), Error>; + /// Return the on-chain MMR root hash. fn mmr_root() -> Result; - /// Generate MMR proof for a series of block numbers. If `best_known_block_number = Some(n)`, - /// use historical MMR state at given block height `n`. Else, use current MMR state. - fn generate_proof( - block_numbers: Vec, - best_known_block_number: Option - ) -> Result<(Vec, Proof), Error>; + /// Generate MMR proof for a series of leaves under given indices. + fn generate_batch_proof(leaf_indices: Vec) -> Result<(Vec, BatchProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// - /// Note this function will use on-chain MMR root hash and check if the proof matches the hash. + /// Note this function will use on-chain MMR root hash and check if the proof + /// matches the hash. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] - fn verify_proof(leaves: Vec, proof: Proof) -> Result<(), Error>; + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [BatchProof] + fn verify_batch_proof(leaves: Vec, proof: BatchProof) -> Result<(), Error>; - /// Verify MMR proof against given root hash for a batch of leaves. + /// Verify MMR proof against given root hash or a batch of leaves. /// /// Note this function does not require any on-chain storage - the /// proof is verified against given MMR root hash. /// /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] - fn verify_proof_stateless(root: Hash, leaves: Vec, proof: Proof) + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [BatchProof] + fn verify_batch_proof_stateless(root: Hash, leaves: Vec, proof: BatchProof) -> Result<(), Error>; } } @@ -465,7 +497,7 @@ mod tests { fn should_encode_decode_proof() { // given let proof: TestProof = Proof { - leaf_indices: vec![5], + leaf_index: 5, leaf_count: 10, items: vec![ hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), @@ -503,16 +535,11 @@ mod tests { cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>() ); // check encoding correctness - assert_eq!( - &encoded[0], - &array_bytes::hex2bytes_unchecked("00343048656c6c6f20576f726c6421") - ); + assert_eq!(&encoded[0], &hex_literal::hex!("00343048656c6c6f20576f726c6421")); assert_eq!( encoded[1].as_slice(), - array_bytes::hex2bytes_unchecked( - "01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd" - ) - .as_slice() + hex_literal::hex!("01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd") + .as_ref() ); } diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 293a17624820b..a200d5c41ee35 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } @@ -36,4 +36,4 @@ path = "src/phragmms_balancing.rs" [[bin]] name = "phragmen_pjr" -path = "src/phragmen_pjr.rs" +path = "src/phragmen_pjr.rs" \ No newline at end of file diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index ad9bd43f9bce0..e5853f28c4929 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -80,7 +80,7 @@ pub fn generate_random_npos_inputs( } candidates.push(id); } - candidates.sort(); + candidates.sort_unstable(); candidates.dedup(); assert_eq!(candidates.len(), candidate_count); @@ -99,11 +99,11 @@ pub fn generate_random_npos_inputs( let mut chosen_candidates = Vec::with_capacity(n_candidates_chosen); chosen_candidates.extend(candidates.choose_multiple(&mut rng, n_candidates_chosen)); - chosen_candidates.sort(); + chosen_candidates.sort_unstable(); voters.push((id, vote_weight, chosen_candidates)); } - voters.sort(); + voters.sort_unstable(); voters.dedup_by_key(|(id, _weight, _chosen_candidates)| *id); assert_eq!(voters.len(), voter_count); diff --git a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs index 0401249a3df1d..2396fdfa3b40e 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs @@ -68,18 +68,18 @@ fn main() { #[cfg(not(fuzzing))] #[derive(Debug, Parser)] -#[command(author, version, about)] +#[clap(author, version, about)] struct Opt { /// How many candidates participate in this election - #[arg(short, long)] + #[clap(short, long)] candidates: Option, /// How many voters participate in this election - #[arg(short, long)] + #[clap(short, long)] voters: Option, /// Random seed to use in this election - #[arg(long)] + #[clap(long)] seed: Option, } diff --git a/primitives/npos-elections/fuzzer/src/reduce.rs b/primitives/npos-elections/fuzzer/src/reduce.rs index 602467a343884..605f2d6081a6f 100644 --- a/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/primitives/npos-elections/fuzzer/src/reduce.rs @@ -90,7 +90,7 @@ fn generate_random_phragmen_assignment( let target = targets_to_chose_from.remove(rng.gen_range(0..targets_to_chose_from.len())); if winners.iter().all(|w| *w != target) { - winners.push(target); + winners.push(target.clone()); } (target, rng.gen_range(1 * KSM..100 * KSM)) }) diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index d0c9ed18caddc..dd2a9bf198f8d 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -74,16 +74,17 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; -use sp_core::{bounded::BoundedVec, RuntimeDebug}; +use sp_core::RuntimeDebug; use sp_std::{ cell::RefCell, cmp::Ordering, collections::btree_map::BTreeMap, prelude::*, rc::Rc, vec, }; +use codec::{Decode, Encode, MaxEncodedLen}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + #[cfg(test)] mod mock; #[cfg(test)] @@ -450,11 +451,6 @@ impl Default for Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; -/// Same as `Supports` but bounded by `B`. -/// -/// To note, the inner `Support` is still unbounded. -pub type BoundedSupports = BoundedVec<(A, Support), B>; - /// Linkage from a winner to their [`Support`]. /// /// This is more helpful than a normal [`Supports`] as it allows faster error checking. diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 5a06e3f3c88ca..dd85ce9b6dfae 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -92,7 +92,7 @@ where .into_iter() .enumerate() .map(|(idx, who)| { - c_idx_cache.insert(who, idx); + c_idx_cache.insert(who.clone(), idx); _Candidate { who, ..Default::default() } }) .collect::>>(); @@ -103,7 +103,7 @@ where for v in votes { if let Some(idx) = c_idx_cache.get(&v) { candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; - edges.push(_Edge { who: v, candidate_index: *idx, ..Default::default() }); + edges.push(_Edge { who: v.clone(), candidate_index: *idx, ..Default::default() }); } } _Voter { who, edges, budget: voter_stake, load: 0f64 } @@ -143,21 +143,21 @@ where } } - elected_candidates.push((winner.who, winner.approval_stake as ExtendedBalance)); + elected_candidates.push((winner.who.clone(), winner.approval_stake as ExtendedBalance)); } else { break } } for n in &mut voters { - let mut assignment = (n.who, vec![]); + let mut assignment = (n.who.clone(), vec![]); for e in &mut n.edges { if let Some(c) = elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) { if c != n.who { let ratio = e.load / n.load; - assignment.1.push((e.who, ratio)); + assignment.1.push((e.who.clone(), ratio)); } } } @@ -321,7 +321,7 @@ pub(crate) fn run_and_compare( candidates.clone(), voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -368,7 +368,7 @@ pub(crate) fn build_support_map_float( let mut supports = <_SupportMap>::new(); result.winners.iter().map(|(e, _)| (e, stake_of(e) as f64)).for_each(|(e, s)| { let item = _Support { own: s, total: s, ..Default::default() }; - supports.insert(*e, item); + supports.insert(e.clone(), item); }); for (n, assignment) in result.assignments.iter_mut() { @@ -377,7 +377,7 @@ pub(crate) fn build_support_map_float( let other_stake = nominator_stake * *r; if let Some(support) = supports.get_mut(c) { support.total = support.total + other_stake; - support.others.push((*n, other_stake)); + support.others.push((n.clone(), other_stake)); } *r = other_stake; } diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs index fd7c8ef539241..914834fbb2aef 100644 --- a/primitives/npos-elections/src/pjr.rs +++ b/primitives/npos-elections/src/pjr.rs @@ -481,7 +481,7 @@ mod tests { assert_eq!( candidates .iter() - .map(|c| (c.borrow().who, c.borrow().elected, c.borrow().backed_stake)) + .map(|c| (c.borrow().who.clone(), c.borrow().elected, c.borrow().backed_stake)) .collect::>(), vec![(10, false, 0), (20, true, 15), (30, false, 0), (40, true, 15)], ); diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 6f2e4fca77115..5b88889201b31 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -233,7 +233,7 @@ fn phragmen_poc_works() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -289,7 +289,7 @@ fn phragmen_poc_works_with_balancing() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), Some(config), ) @@ -376,7 +376,7 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { candidates.clone(), auto_generate_self_voters(&candidates) .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -407,7 +407,7 @@ fn phragmen_accuracy_on_large_scale_voters_and_candidates() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -439,7 +439,7 @@ fn phragmen_accuracy_on_small_scale_self_vote() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -469,7 +469,7 @@ fn phragmen_accuracy_on_small_scale_no_self_vote() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -505,7 +505,7 @@ fn phragmen_large_scale_test() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -532,7 +532,7 @@ fn phragmen_large_scale_test_2() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -601,7 +601,7 @@ fn elect_has_no_entry_barrier() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -622,7 +622,7 @@ fn phragmen_self_votes_should_be_kept() { candidates, voters .iter() - .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, ) @@ -872,15 +872,30 @@ mod score { let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; - assert_eq!(is_score_better(claim, initial, Perbill::from_rational(1u32, 10_000),), true,); + assert_eq!( + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(1u32, 10_000),), + true, + ); - assert_eq!(is_score_better(claim, initial, Perbill::from_rational(2u32, 10_000),), true,); + assert_eq!( + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(2u32, 10_000),), + true, + ); - assert_eq!(is_score_better(claim, initial, Perbill::from_rational(3u32, 10_000),), true,); + assert_eq!( + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(3u32, 10_000),), + true, + ); - assert_eq!(is_score_better(claim, initial, Perbill::from_rational(4u32, 10_000),), true,); + assert_eq!( + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(4u32, 10_000),), + true, + ); - assert_eq!(is_score_better(claim, initial, Perbill::from_rational(5u32, 10_000),), false,); + assert_eq!( + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(5u32, 10_000),), + false, + ); } #[test] diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index 19f76dddbab22..bd429fd0e8af7 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -16,4 +16,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = "0.3.64" lazy_static = "1.4.0" -regex = "1.6.0" +regex = "1.5.5" diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index f4a4fe12f6c47..335eb6d6c9a0e 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -18,4 +18,4 @@ serde = { version = "1.0.136", features = ["derive"] } sp-core = { version = "6.0.0", path = "../core" } [dev-dependencies] -serde_json = "1.0.85" +serde_json = "1.0.79" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index e7f0cee3f140f..a657c98381c9a 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" -primitive-types = { version = "0.12.0", default-features = false } +primitive-types = { version = "0.11.1", default-features = false } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } impl-trait-for-tuples = "0.2.2" @@ -37,8 +37,6 @@ trybuild = "1.0.60" [features] default = [ "std" ] std = [ - "sp-storage/std", - "bytes/std", "sp-wasm-interface/std", "sp-std/std", "sp-tracing/std", diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 6ebcb7482a779..f9bf8825f9486 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -308,10 +308,10 @@ pub use sp_std; /// /// 1. The generated functions are not callable from the native side. /// 2. The trait as shown above is not implemented for [`Externalities`] and is instead -/// implemented for `FunctionContext` (from `sp-wasm-interface`). +/// implemented for `FunctionExecutor` (from `sp-wasm-interface`). /// /// # Disable tracing -/// By adding `no_tracing` to the list of options you can prevent the wasm-side interface from +/// By addding `no_tracing` to the list of options you can prevent the wasm-side interface from /// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant /// for the case when that would create a circular dependency. You usually _do not_ want to add /// this flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 8d7b5b2b93354..1493aa2324f56 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -19,7 +19,7 @@ either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"], optional = true } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } paste = "1.0" rand = { version = "0.7.2", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } @@ -29,11 +29,10 @@ sp-arithmetic = { version = "5.0.0", default-features = false, path = "../arithm sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-io = { version = "6.0.0", default-features = false, path = "../io" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } -sp-weights = { version = "4.0.0", default-features = false, path = "../weights" } [dev-dependencies] rand = "0.7.2" -serde_json = "1.0.85" +serde_json = "1.0.79" zstd = { version = "0.11.2", default-features = false } sp-api = { version = "4.0.0-dev", path = "../api" } sp-state-machine = { version = "0.12.0", path = "../state-machine" } @@ -41,6 +40,7 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] +bench = [] runtime-benchmarks = [] default = ["std"] std = [ @@ -57,5 +57,4 @@ std = [ "sp-core/std", "sp-io/std", "sp-std/std", - "sp-weights/std", ] diff --git a/primitives/core/src/bounded.rs b/primitives/runtime/src/bounded.rs similarity index 100% rename from primitives/core/src/bounded.rs rename to primitives/runtime/src/bounded.rs diff --git a/primitives/core/src/bounded/bounded_btree_map.rs b/primitives/runtime/src/bounded/bounded_btree_map.rs similarity index 81% rename from primitives/core/src/bounded/bounded_btree_map.rs rename to primitives/runtime/src/bounded/bounded_btree_map.rs index d2c148d6de9c5..aefd168632a1e 100644 --- a/primitives/core/src/bounded/bounded_btree_map.rs +++ b/primitives/runtime/src/bounded/bounded_btree_map.rs @@ -17,7 +17,7 @@ //! Traits, types and structs to support a bounded BTreeMap. -use crate::{Get, TryCollect}; +use crate::traits::{Get, TryCollect}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{borrow::Borrow, collections::btree_map::BTreeMap, marker::PhantomData, ops::Deref}; @@ -161,44 +161,6 @@ where { self.0.remove_entry(key) } - - /// Gets a mutable iterator over the entries of the map, sorted by key. - /// - /// See [`BTreeMap::iter_mut`] for more information. - pub fn iter_mut(&mut self) -> sp_std::collections::btree_map::IterMut { - self.0.iter_mut() - } - - /// Consume the map, applying `f` to each of it's values and returning a new map. - pub fn map(self, mut f: F) -> BoundedBTreeMap - where - F: FnMut((&K, V)) -> T, - { - BoundedBTreeMap::::unchecked_from( - self.0 - .into_iter() - .map(|(k, v)| { - let t = f((&k, v)); - (k, t) - }) - .collect(), - ) - } - - /// Consume the map, applying `f` to each of it's values as long as it returns successfully. If - /// an `Err(E)` is ever encountered, the mapping is short circuited and the error is returned; - /// otherwise, a new map is returned in the contained `Ok` value. - pub fn try_map(self, mut f: F) -> Result, E> - where - F: FnMut((&K, V)) -> Result, - { - Ok(BoundedBTreeMap::::unchecked_from( - self.0 - .into_iter() - .map(|(k, v)| (f((&k, v)).map(|t| (k, t)))) - .collect::, _>>()?, - )) - } } impl Default for BoundedBTreeMap @@ -394,7 +356,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::ConstU32; + use crate::traits::ConstU32; fn map_from_keys(keys: &[K]) -> BTreeMap where @@ -546,7 +508,7 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap(); assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); - // but these won't work + // but these worn't work let b2: Result>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect(); assert!(b2.is_err()); @@ -555,71 +517,4 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect(); assert!(b2.is_err()); } - - #[test] - fn test_iter_mut() { - let mut b1: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); - - let b2: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); - - b1.iter_mut().for_each(|(_, v)| *v *= 2); - - assert_eq!(b1, b2); - } - - #[test] - fn map_retains_size() { - let b1 = boundedmap_from_keys::>(&[1, 2]); - let b2 = b1.clone(); - - assert_eq!(b1.len(), b2.map(|(_, _)| 5_u32).len()); - } - - #[test] - fn map_maps_properly() { - let b1: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); - let b2: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); - - assert_eq!(b1, b2.map(|(_, v)| v * 2)); - } - - #[test] - fn try_map_retains_size() { - let b1 = boundedmap_from_keys::>(&[1, 2]); - let b2 = b1.clone(); - - assert_eq!(b1.len(), b2.try_map::<_, (), _>(|(_, _)| Ok(5_u32)).unwrap().len()); - } - - #[test] - fn try_map_maps_properly() { - let b1: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); - let b2: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); - - assert_eq!(b1, b2.try_map::<_, (), _>(|(_, v)| Ok(v * 2)).unwrap()); - } - - #[test] - fn try_map_short_circuit() { - let b1: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); - - assert_eq!(Err("overflow"), b1.try_map(|(_, v)| v.checked_mul(100).ok_or("overflow"))); - } - - #[test] - fn try_map_ok() { - let b1: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); - let b2: BoundedBTreeMap> = - [1, 2, 3, 4].into_iter().map(|k| (k, (k as u16) * 100)).try_collect().unwrap(); - - assert_eq!(Ok(b2), b1.try_map(|(_, v)| (v as u16).checked_mul(100_u16).ok_or("overflow"))); - } } diff --git a/primitives/core/src/bounded/bounded_btree_set.rs b/primitives/runtime/src/bounded/bounded_btree_set.rs similarity index 99% rename from primitives/core/src/bounded/bounded_btree_set.rs rename to primitives/runtime/src/bounded/bounded_btree_set.rs index 5feac6b7150f0..c19d176f11bef 100644 --- a/primitives/core/src/bounded/bounded_btree_set.rs +++ b/primitives/runtime/src/bounded/bounded_btree_set.rs @@ -17,7 +17,7 @@ //! Traits, types and structs to support a bounded `BTreeSet`. -use crate::{Get, TryCollect}; +use crate::traits::{Get, TryCollect}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{borrow::Borrow, collections::btree_set::BTreeSet, marker::PhantomData, ops::Deref}; @@ -321,7 +321,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::ConstU32; + use crate::traits::ConstU32; fn set_from_keys(keys: &[T]) -> BTreeSet where diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs similarity index 90% rename from primitives/core/src/bounded/bounded_vec.rs rename to primitives/runtime/src/bounded/bounded_vec.rs index 2f39f3340ce50..10d9fc608c273 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -19,7 +19,7 @@ //! or a double map. use super::WeakBoundedVec; -use crate::{Get, TryCollect}; +use crate::traits::{Get, TryCollect}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ ops::{Deref, Index, IndexMut, RangeBounds}, @@ -47,12 +47,6 @@ pub struct BoundedVec( #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData, ); -/// Create an object through truncation. -pub trait TruncateFrom { - /// Create an object through truncation. - fn truncate_from(unbound: T) -> Self; -} - #[cfg(feature = "std")] impl<'de, T, S: Get> Deserialize<'de> for BoundedVec where @@ -240,12 +234,12 @@ impl<'a, T: Ord, Bound: Get> Ord for BoundedSlice<'a, T, Bound> { } impl<'a, T, S: Get> TryFrom<&'a [T]> for BoundedSlice<'a, T, S> { - type Error = &'a [T]; + type Error = (); fn try_from(t: &'a [T]) -> Result { if t.len() <= S::get() as usize { Ok(BoundedSlice(t, PhantomData)) } else { - Err(t) + Err(()) } } } @@ -256,28 +250,12 @@ impl<'a, T, S> From> for &'a [T] { } } -impl<'a, T, S: Get> TruncateFrom<&'a [T]> for BoundedSlice<'a, T, S> { - fn truncate_from(unbound: &'a [T]) -> Self { - BoundedSlice::::truncate_from(unbound) - } -} - impl<'a, T, S> Clone for BoundedSlice<'a, T, S> { fn clone(&self) -> Self { BoundedSlice(self.0, PhantomData) } } -impl<'a, T, S> sp_std::fmt::Debug for BoundedSlice<'a, T, S> -where - &'a [T]: sp_std::fmt::Debug, - S: Get, -{ - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.debug_tuple("BoundedSlice").field(&self.0).field(&S::get()).finish() - } -} - // Since a reference `&T` is always `Copy`, so is `BoundedSlice<'a, T, S>`. impl<'a, T, S> Copy for BoundedSlice<'a, T, S> {} @@ -298,14 +276,6 @@ impl<'a, T, S> sp_std::iter::IntoIterator for BoundedSlice<'a, T, S> { } } -impl<'a, T, S: Get> BoundedSlice<'a, T, S> { - /// Create an instance from the first elements of the given slice (or all of it if it is smaller - /// than the length bound). - pub fn truncate_from(s: &'a [T]) -> Self { - Self(&s[0..(s.len().min(S::get() as usize))], PhantomData) - } -} - impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { let inner = Vec::::decode(input)?; @@ -349,17 +319,6 @@ impl BoundedVec { self.0.sort_by(compare) } - /// Exactly the same semantics as [`slice::sort_by_key`]. - /// - /// This is safe since sorting cannot change the number of elements in the vector. - pub fn sort_by_key(&mut self, f: F) - where - F: FnMut(&T) -> K, - K: sp_std::cmp::Ord, - { - self.0.sort_by_key(f) - } - /// Exactly the same semantics as [`slice::sort`]. /// /// This is safe since sorting cannot change the number of elements in the vector. @@ -650,12 +609,12 @@ impl> BoundedVec { /// # Panics /// /// Panics if `index > len`. - pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), T> { + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { if self.len() < Self::bound() { self.0.insert(index, element); Ok(()) } else { - Err(element) + Err(()) } } @@ -665,12 +624,12 @@ impl> BoundedVec { /// # Panics /// /// Panics if the new capacity exceeds isize::MAX bytes. - pub fn try_push(&mut self, element: T) -> Result<(), T> { + pub fn try_push(&mut self, element: T) -> Result<(), ()> { if self.len() < Self::bound() { self.0.push(element); Ok(()) } else { - Err(element) + Err(()) } } } @@ -703,23 +662,17 @@ where } impl> TryFrom> for BoundedVec { - type Error = Vec; + type Error = (); fn try_from(t: Vec) -> Result { if t.len() <= Self::bound() { // explicit check just above Ok(Self::unchecked_from(t)) } else { - Err(t) + Err(()) } } } -impl> TruncateFrom> for BoundedVec { - fn truncate_from(unbound: Vec) -> Self { - BoundedVec::::truncate_from(unbound) - } -} - // It is okay to give a non-mutable reference of the inner vec to anyone. impl AsRef> for BoundedVec { fn as_ref(&self) -> &Vec { @@ -837,12 +790,6 @@ where } } -impl<'a, T: PartialEq, S: Get> PartialEq<&'a [T]> for BoundedSlice<'a, T, S> { - fn eq(&self, other: &&'a [T]) -> bool { - &self.0 == other - } -} - impl> PartialEq> for BoundedVec { fn eq(&self, other: &Vec) -> bool { &self.0 == other @@ -900,7 +847,7 @@ where fn max_encoded_len() -> usize { // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // See: https://docs.substrate.io/reference/scale-codec/ + // https://docs.substrate.io/v3/advanced/scale-codec codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) @@ -926,17 +873,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::{bounded_vec, ConstU32}; - - #[test] - fn slice_truncate_from_works() { - let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4, 5]); - assert_eq!(bounded.deref(), &[1, 2, 3, 4]); - let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4]); - assert_eq!(bounded.deref(), &[1, 2, 3, 4]); - let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3]); - assert_eq!(bounded.deref(), &[1, 2, 3]); - } + use crate::{bounded_vec, traits::ConstU32}; #[test] fn slide_works() { @@ -1252,47 +1189,4 @@ pub mod test { b1.iter().map(|x| x + 1).rev().take(2).try_collect(); assert!(b2.is_err()); } - - #[test] - fn bounded_vec_debug_works() { - let bound = BoundedVec::>::truncate_from(vec![1, 2, 3]); - assert_eq!(format!("{:?}", bound), "BoundedVec([1, 2, 3], 5)"); - } - - #[test] - fn bounded_slice_debug_works() { - let bound = BoundedSlice::>::truncate_from(&[1, 2, 3]); - assert_eq!(format!("{:?}", bound), "BoundedSlice([1, 2, 3], 5)"); - } - - #[test] - fn bounded_vec_sort_by_key_works() { - let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; - // Sort by absolute value. - v.sort_by_key(|k| k.abs()); - assert_eq!(v, vec![1, 2, -3, 4, -5]); - } - - #[test] - fn bounded_vec_truncate_from_works() { - let unbound = vec![1, 2, 3, 4, 5]; - let bound = BoundedVec::>::truncate_from(unbound.clone()); - assert_eq!(bound, vec![1, 2, 3]); - } - - #[test] - fn bounded_slice_truncate_from_works() { - let unbound = [1, 2, 3, 4, 5]; - let bound = BoundedSlice::>::truncate_from(&unbound); - assert_eq!(bound, &[1, 2, 3][..]); - } - - #[test] - fn bounded_slice_partialeq_slice_works() { - let unbound = [1, 2, 3]; - let bound = BoundedSlice::>::truncate_from(&unbound); - - assert_eq!(bound, &unbound[..]); - assert!(bound == &unbound[..]); - } } diff --git a/primitives/core/src/bounded/weak_bounded_vec.rs b/primitives/runtime/src/bounded/weak_bounded_vec.rs similarity index 99% rename from primitives/core/src/bounded/weak_bounded_vec.rs rename to primitives/runtime/src/bounded/weak_bounded_vec.rs index 5aff35f010c8b..ed9f4bba62b55 100644 --- a/primitives/core/src/bounded/weak_bounded_vec.rs +++ b/primitives/runtime/src/bounded/weak_bounded_vec.rs @@ -19,7 +19,7 @@ //! or a double map. use super::{BoundedSlice, BoundedVec}; -use crate::Get; +use crate::traits::Get; use codec::{Decode, Encode, MaxEncodedLen}; use core::{ ops::{Deref, Index, IndexMut}, @@ -443,7 +443,7 @@ where fn max_encoded_len() -> usize { // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // See: https://docs.substrate.io/reference/scale-codec/ + // https://docs.substrate.io/v3/advanced/scale-codec codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) @@ -453,7 +453,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::ConstU32; + use crate::traits::ConstU32; #[test] fn bound_returns_correct_value() { diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index c040b7cf517e0..99733dbbe9a55 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -170,7 +170,7 @@ fn test_calculate_for_fraction_times_denominator() { }; pub fn formal_calculate_for_fraction_times_denominator(n: u64, d: u64) -> u64 { - if n <= Perbill::from_parts(0_500_000_000) * d { + if n <= Perbill::from_parts(0_500_000_000) * d.clone() { n + d / 2 } else { (d as u128 * 2 - n as u128 * 2).try_into().unwrap() diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index fd7745c6031ff..5d6c657a68977 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -39,13 +39,12 @@ pub struct CheckedExtrinsic { pub function: Call, } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, + Call: Member + Dispatchable, Extra: SignedExtension, - RuntimeOrigin: From>, + Origin: From>, { type Call = Call; @@ -79,7 +78,7 @@ where U::pre_dispatch(&self.function)?; (None, None) }; - let res = self.function.dispatch(RuntimeOrigin::from(maybe_who)); + let res = self.function.dispatch(Origin::from(maybe_who)); let post_info = match res { Ok(info) => info, Err(err) => err.post_info, diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index b26545fb8404e..2ca50b12b2e1f 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -63,7 +63,7 @@ impl Era { /// does not exceed `BlockHashCount` parameter passed to `system` module, since that /// prunes old blocks and renders transactions immediately invalid. pub fn mortal(period: u64, current: u64) -> Self { - let period = period.checked_next_power_of_two().unwrap_or(1 << 16).clamp(4, 1 << 16); + let period = period.checked_next_power_of_two().unwrap_or(1 << 16).max(4).min(1 << 16); let phase = current % period; let quantize_factor = (period >> 12).max(1); let quantized_phase = phase / quantize_factor * quantize_factor; @@ -105,7 +105,7 @@ impl Encode for Era { Self::Immortal => output.push_byte(0), Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 | + let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; encoded.encode_to(output); }, diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 3752e31cbeeb0..bf77c08b76906 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -19,6 +19,10 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +// to allow benchmarking +#![cfg_attr(feature = "bench", feature(test))] +#[cfg(feature = "bench")] +extern crate test; #[doc(hidden)] pub use codec; @@ -32,8 +36,6 @@ pub use sp_std; #[doc(hidden)] pub use paste; -#[doc(hidden)] -pub use sp_arithmetic::traits::Saturating; #[doc(hidden)] pub use sp_application_crypto as app_crypto; @@ -53,6 +55,7 @@ use sp_std::prelude::*; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +pub mod bounded; pub mod curve; pub mod generic; pub mod legacy; @@ -67,6 +70,9 @@ pub mod transaction_validity; pub use crate::runtime_string::*; +// Re-export bounded types +pub use bounded::{BoundedBTreeMap, BoundedBTreeSet, BoundedSlice, BoundedVec, WeakBoundedVec}; + // Re-export Multiaddress pub use multiaddress::MultiAddress; @@ -76,13 +82,9 @@ pub use generic::{Digest, DigestItem}; pub use sp_application_crypto::{BoundToRuntimeAppPublic, RuntimeAppPublic}; /// Re-export this since it's part of the API of this crate. pub use sp_core::{ - bounded::{BoundedBTreeMap, BoundedBTreeSet, BoundedSlice, BoundedVec, WeakBoundedVec}, crypto::{key_types, AccountId32, CryptoType, CryptoTypeId, KeyTypeId}, TypeId, }; -/// Re-export bounded_vec and bounded_btree_map macros only when std is enabled. -#[cfg(feature = "std")] -pub use sp_core::{bounded_btree_map, bounded_vec}; /// Re-export `RuntimeDebug`, to avoid dependency clutter. pub use sp_core::RuntimeDebug; @@ -95,7 +97,7 @@ pub use sp_arithmetic::helpers_128bit; pub use sp_arithmetic::{ traits::SaturatedConversion, FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, - Rounding, UpperOf, + UpperOf, }; pub use either::Either; @@ -234,7 +236,7 @@ impl BuildStorage for () { /// Consensus engine unique ID. pub type ConsensusEngineId = [u8; 4]; -/// Signature verify that can work with any known signature types. +/// Signature verify that can work with any known signature types.. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Eq, PartialEq, Clone, Encode, Decode, MaxEncodedLen, RuntimeDebug, TypeInfo)] pub enum MultiSignature { @@ -547,12 +549,6 @@ pub enum DispatchError { /// The number of transactional layers has been reached, or we are not in a transactional /// layer. Transactional(TransactionalError), - /// Resources exhausted, e.g. attempt to read/write data which is too large to manipulate. - Exhausted, - /// The state is corrupt; this is generally not going to fix itself. - Corruption, - /// Some resource (e.g. a preimage) is unavailable right now. This might fix itself later. - Unavailable, } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about @@ -677,21 +673,18 @@ impl From<&'static str> for DispatchError { impl From for &'static str { fn from(err: DispatchError) -> &'static str { - use DispatchError::*; match err { - Other(msg) => msg, - CannotLookup => "Cannot lookup", - BadOrigin => "Bad origin", - Module(ModuleError { message, .. }) => message.unwrap_or("Unknown module error"), - ConsumerRemaining => "Consumer remaining", - NoProviders => "No providers", - TooManyConsumers => "Too many consumers", - Token(e) => e.into(), - Arithmetic(e) => e.into(), - Transactional(e) => e.into(), - Exhausted => "Resources exhausted", - Corruption => "State corrupt", - Unavailable => "Resource unavailable", + DispatchError::Other(msg) => msg, + DispatchError::CannotLookup => "Cannot lookup", + DispatchError::BadOrigin => "Bad origin", + DispatchError::Module(ModuleError { message, .. }) => + message.unwrap_or("Unknown module error"), + DispatchError::ConsumerRemaining => "Consumer remaining", + DispatchError::NoProviders => "No providers", + DispatchError::TooManyConsumers => "Too many consumers", + DispatchError::Token(e) => e.into(), + DispatchError::Arithmetic(e) => e.into(), + DispatchError::Transactional(e) => e.into(), } } } @@ -707,37 +700,33 @@ where impl traits::Printable for DispatchError { fn print(&self) { - use DispatchError::*; "DispatchError".print(); match self { - Other(err) => err.print(), - CannotLookup => "Cannot lookup".print(), - BadOrigin => "Bad origin".print(), - Module(ModuleError { index, error, message }) => { + Self::Other(err) => err.print(), + Self::CannotLookup => "Cannot lookup".print(), + Self::BadOrigin => "Bad origin".print(), + Self::Module(ModuleError { index, error, message }) => { index.print(); error.print(); if let Some(msg) = message { msg.print(); } }, - ConsumerRemaining => "Consumer remaining".print(), - NoProviders => "No providers".print(), - TooManyConsumers => "Too many consumers".print(), - Token(e) => { + Self::ConsumerRemaining => "Consumer remaining".print(), + Self::NoProviders => "No providers".print(), + Self::TooManyConsumers => "Too many consumers".print(), + Self::Token(e) => { "Token error: ".print(); <&'static str>::from(*e).print(); }, - Arithmetic(e) => { + Self::Arithmetic(e) => { "Arithmetic error: ".print(); <&'static str>::from(*e).print(); }, - Transactional(e) => { + Self::Transactional(e) => { "Transactional error: ".print(); <&'static str>::from(*e).print(); }, - Exhausted => "Resources exhausted".print(), - Corruption => "State corrupt".print(), - Unavailable => "Resource unavailable".print(), } } } @@ -836,8 +825,7 @@ pub fn verify_encoded_lazy( macro_rules! assert_eq_error_rate { ($x:expr, $y:expr, $error:expr $(,)?) => { assert!( - ($x >= $crate::Saturating::saturating_sub($y, $error)) && - ($x <= $crate::Saturating::saturating_add($y, $error)), + ($x) >= (($y) - ($error)) && ($x) <= (($y) + ($error)), "{:?} != {:?} (with error rate {:?})", $x, $y, @@ -846,19 +834,42 @@ macro_rules! assert_eq_error_rate { }; } -/// Same as [`assert_eq_error_rate`], but intended to be used with floating point number, or -/// generally those who do not have over/underflow potentials. +/// Build a bounded vec from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. #[macro_export] #[cfg(feature = "std")] -macro_rules! assert_eq_error_rate_float { - ($x:expr, $y:expr, $error:expr $(,)?) => { - assert!( - ($x >= $y - $error) && ($x <= $y + $error), - "{:?} != {:?} (with error rate {:?})", - $x, - $y, - $error, - ); +macro_rules! bounded_vec { + ($ ($values:expr),* $(,)?) => { + { + $crate::sp_std::vec![$($values),*].try_into().unwrap() + } + }; + ( $value:expr ; $repetition:expr ) => { + { + $crate::sp_std::vec![$value ; $repetition].try_into().unwrap() + } + } +} + +/// Build a bounded btree-map from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_btree_map { + ($ ( $key:expr => $value:expr ),* $(,)?) => { + { + $crate::traits::TryCollect::<$crate::BoundedBTreeMap<_, _, _>>::try_collect( + $crate::sp_std::vec![$(($key, $value)),*].into_iter() + ).unwrap() + } }; } @@ -1107,7 +1118,7 @@ mod tests { ext.insert(b"c".to_vec(), vec![3u8; 33]); ext.insert(b"d".to_vec(), vec![4u8; 33]); - let pre_root = *ext.backend.root(); + let pre_root = ext.backend.root().clone(); let (_, proof) = ext.execute_and_prove(|| { sp_io::storage::get(b"a"); sp_io::storage::get(b"b"); diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 47325743bd2f3..4ea9030745296 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -77,8 +77,8 @@ const STORAGE_LOCK_DEFAULT_EXPIRY_DURATION: Duration = Duration::from_millis(20_ const STORAGE_LOCK_DEFAULT_EXPIRY_BLOCKS: u32 = 4; /// Time between checks if the lock is still being held in milliseconds. -const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN: Duration = Duration::from_millis(10); -const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX: Duration = Duration::from_millis(100); +const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN: Duration = Duration::from_millis(100); +const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX: Duration = Duration::from_millis(10); /// Lockable item for use with a persisted storage lock. /// @@ -137,9 +137,10 @@ impl Lockable for Time { let remainder: Duration = now.diff(deadline); // do not snooze the full duration, but instead snooze max 100ms // it might get unlocked in another thread - let snooze = remainder.clamp( + use core::cmp::{max, min}; + let snooze = max( + min(remainder, STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX), STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN, - STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX, ); sp_io::offchain::sleep_until(now.add(snooze)); } @@ -238,9 +239,10 @@ impl Lockable for BlockAndTime { fn snooze(deadline: &Self::Deadline) { let now = offchain::timestamp(); let remainder: Duration = now.diff(&(deadline.timestamp)); - let snooze = remainder.clamp( + use core::cmp::{max, min}; + let snooze = max( + min(remainder, STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX), STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN, - STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX, ); sp_io::offchain::sleep_until(now.add(snooze)); } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index d16a37e6a2059..003fa62c9e088 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -358,15 +358,8 @@ where impl Applyable for TestXt where - Call: 'static - + Sized - + Send - + Sync - + Clone - + Eq - + Codec - + Debug - + Dispatchable, + Call: + 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, Extra: SignedExtension, Origin: From>, { diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 1c48b1933431d..a82ae1d62f56a 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -38,11 +38,6 @@ pub use sp_arithmetic::traits::{ }; use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId}; #[doc(hidden)] -pub use sp_core::{ - parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, - ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, -}; -#[doc(hidden)] pub use sp_std::marker::PhantomData; use sp_std::{self, fmt::Debug, prelude::*}; #[cfg(feature = "std")] @@ -281,6 +276,203 @@ where } } +/// A trait for querying a single value from a type defined in the trait. +/// +/// It is not required that the value is constant. +pub trait TypedGet { + /// The type which is returned. + type Type; + /// Return the current value. + fn get() -> Self::Type; +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { + T::default() + } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +/// Try and collect into a collection `C`. +pub trait TryCollect { + /// The error type that gets returned when a collection can't be made from `self`. + type Error; + /// Consume self and try to collect the results into `C`. + /// + /// This is useful in preventing the undesirable `.collect().try_into()` call chain on + /// collections that need to be converted into a bounded type (e.g. `BoundedVec`). + fn try_collect(self) -> Result; +} + +macro_rules! impl_const_get { + ($name:ident, $t:ty) => { + #[doc = "Const getter for a basic type."] + #[derive($crate::RuntimeDebug)] + pub struct $name; + impl Get<$t> for $name { + fn get() -> $t { + T + } + } + impl Get> for $name { + fn get() -> Option<$t> { + Some(T) + } + } + impl TypedGet for $name { + type Type = $t; + fn get() -> $t { + T + } + } + }; +} + +impl_const_get!(ConstBool, bool); +impl_const_get!(ConstU8, u8); +impl_const_get!(ConstU16, u16); +impl_const_get!(ConstU32, u32); +impl_const_get!(ConstU64, u64); +impl_const_get!(ConstU128, u128); +impl_const_get!(ConstI8, i8); +impl_const_get!(ConstI16, i16); +impl_const_get!(ConstI32, i32); +impl_const_get!(ConstI64, i64); +impl_const_get!(ConstI128, i128); + +/// Create new implementations of the [`Get`](crate::traits::Get) trait. +/// +/// The so-called parameter type can be created in four different ways: +/// +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. +/// +/// - Declare the parameter type without `const` to have more freedom when creating the value. +/// +/// NOTE: A more substantial version of this macro is available in `frame_support` crate which +/// allows mutable and persistant variants. +/// +/// # Examples +/// +/// ``` +/// # use sp_runtime::traits::Get; +/// # use sp_runtime::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// const FIXED_VALUE: u64 = 10; +/// parameter_types! { +/// pub const Argument: u64 = 42 + FIXED_VALUE; +/// /// Visibility of the type is optional +/// OtherArgument: u64 = non_const_expression(); +/// } +/// +/// trait Config { +/// type Parameter: Get; +/// type OtherParameter: Get; +/// } +/// +/// struct Runtime; +/// impl Config for Runtime { +/// type Parameter = Argument; +/// type OtherParameter = OtherArgument; +/// } +/// ``` +/// +/// # Invalid example: +/// +/// ```compile_fail +/// # use sp_runtime::traits::Get; +/// # use sp_runtime::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// parameter_types! { +/// pub const Argument: u64 = non_const_expression(); +/// } +/// ``` +#[macro_export] +macro_rules! parameter_types { + ( + $( #[ $attr:meta ] )* + $vis:vis const $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL_CONST $name , $type , $value); + $crate::parameter_types!( $( $rest )* ); + ); + ( + $( #[ $attr:meta ] )* + $vis:vis $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL $name, $type, $value); + $crate::parameter_types!( $( $rest )* ); + ); + () => (); + (@IMPL_CONST $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub const fn get() -> $type { + $value + } + } + + impl> $crate::traits::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::traits::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; + (@IMPL $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub fn get() -> $type { + $value + } + } + + impl> $crate::traits::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::traits::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; +} + /// Extensible conversion trait. Generic over only source type, with destination type being /// associated. pub trait Morph { @@ -1018,7 +1210,7 @@ pub trait Dispatchable { /// Every function call from your runtime has an origin, which specifies where the extrinsic was /// generated from. In the case of a signed extrinsic (transaction), the origin contains an /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. - type RuntimeOrigin; + type Origin; /// ... type Config; /// An opaque set of information attached to the transaction. This could be constructed anywhere @@ -1029,8 +1221,7 @@ pub trait Dispatchable { /// with information about a `Dispatchable` that is ownly known post dispatch. type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; /// Actually dispatch this call and return the result of it. - fn dispatch(self, origin: Self::RuntimeOrigin) - -> crate::DispatchResultWithInfo; + fn dispatch(self, origin: Self::Origin) -> crate::DispatchResultWithInfo; } /// Shortcut to reference the `Info` type of a `Dispatchable`. @@ -1039,14 +1230,11 @@ pub type DispatchInfoOf = ::Info; pub type PostDispatchInfoOf = ::PostInfo; impl Dispatchable for () { - type RuntimeOrigin = (); + type Origin = (); type Config = (); type Info = (); type PostInfo = (); - fn dispatch( - self, - _origin: Self::RuntimeOrigin, - ) -> crate::DispatchResultWithInfo { + fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { panic!("This implementation should not be used for actual dispatch."); } } @@ -1824,12 +2012,6 @@ impl Printable for bool { } } -impl Printable for sp_weights::Weight { - fn print(&self) { - self.ref_time().print() - } -} - impl Printable for () { fn print(&self) { "()".print() diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 4646808b8c8e3..7cc8b70df9f96 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -226,8 +226,9 @@ impl From for TransactionValidity { /// Depending on the source we might apply different validation schemes. /// For instance we can disallow specific kinds of transactions if they were not produced /// by our local node (for instance off-chain workers). -#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] +#[derive( + Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf, +)] pub enum TransactionSource { /// Transaction is already included in block. /// diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 90b7df105ecde..6a83e20a94618 100644 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -12,10 +12,16 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasmi = { version = "0.9.1", default-features = false, features = ["core"] } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +wasmi = "0.9.0" + [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } log = { version = "0.4", default-features = false } -wasmi = { version = "0.13", default-features = false } +wasmi = { version = "0.9.0", optional = true } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-io = { version = "6.0.0", default-features = false, path = "../io" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -34,7 +40,7 @@ std = [ "sp-io/std", "sp-std/std", "sp-wasm-interface/std", - "wasmi/std", + "wasmi", ] strict = [] wasmer-sandbox = [] diff --git a/primitives/sandbox/src/embedded_executor.rs b/primitives/sandbox/src/embedded_executor.rs index 115c3192f3d89..4410e26c8d122 100644 --- a/primitives/sandbox/src/embedded_executor.rs +++ b/primitives/sandbox/src/embedded_executor.rs @@ -22,7 +22,7 @@ use alloc::string::String; use wasmi::{ memory_units::Pages, Externals, FuncInstance, FuncRef, GlobalDescriptor, GlobalRef, ImportResolver, MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, - RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, + RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind, }; use sp_std::{ @@ -113,7 +113,7 @@ impl<'a, T> Externals for GuestExternals<'a, T> { ReturnValue::Value(v) => Some(to_wasmi(v)), ReturnValue::Unit => None, }), - Err(HostError) => Err(Trap::host(DummyHostError)), + Err(HostError) => Err(TrapKind::Host(Box::new(DummyHostError)).into()), } } } diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 585e4b4e0dc20..c81f1cd10a824 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -15,4 +15,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = "1.0.136" -serde_json = "1.0.85" +serde_json = "1.0.79" diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 703f0abe80458..5a3e97b4d5274 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -19,9 +19,8 @@ //! A crate which contains primitives that are useful for implementation that uses staking //! approaches in general. Definitions related to sessions, slashing, etc go here. - use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; +use sp_std::collections::btree_map::BTreeMap; pub mod offence; @@ -55,55 +54,25 @@ impl OnStakerSlash for () { } } -/// A struct that reflects stake that an account has in the staking system. Provides a set of -/// methods to operate on it's properties. Aimed at making `StakingInterface` more concise. -pub struct Stake { - /// The stash account whose balance is actually locked and at stake. - pub stash: T::AccountId, - /// The total stake that `stash` has in the staking system. This includes the - /// `active` stake, and any funds currently in the process of unbonding via - /// [`StakingInterface::unbond`]. - /// - /// # Note - /// - /// This is only guaranteed to reflect the amount locked by the staking system. If there are - /// non-staking locks on the bonded pair's balance this amount is going to be larger in - /// reality. - pub total: T::Balance, - /// The total amount of the stash's balance that will be at stake in any forthcoming - /// rounds. - pub active: T::Balance, -} - -/// A generic representation of a staking implementation. -/// -/// This interface uses the terminology of NPoS, but it is aims to be generic enough to cover other -/// implementations as well. +/// Trait for communication with the staking pallet. pub trait StakingInterface { /// Balance type used by the staking system. - type Balance: PartialEq; + type Balance; /// AccountId type used by the staking system type AccountId; - /// The minimum amount required to bond in order to set nomination intentions. This does not - /// necessarily mean the nomination will be counted in an election, but instead just enough to - /// be stored as a nominator. In other words, this is the minimum amount to register the - /// intention to nominate. - fn minimum_nominator_bond() -> Self::Balance; + /// The minimum amount required to bond in order to be a nominator. This does not necessarily + /// mean the nomination will be counted in an election, but instead just enough to be stored as + /// a nominator. In other words, this is the minimum amount to register the intention to + /// nominate. + fn minimum_bond() -> Self::Balance; - /// The minimum amount required to bond in order to set validation intentions. - fn minimum_validator_bond() -> Self::Balance; - - /// Return a stash account that is controlled by a `controller`. + /// Number of eras that staked funds must remain bonded for. /// - /// ## Note + /// # Note /// - /// The controller abstraction is not permanent and might go away. Avoid using this as much as - /// possible. - fn stash_by_ctrl(controller: &Self::AccountId) -> Result; - - /// Number of eras that staked funds must remain bonded for. + /// This must be strictly greater than the staking systems slash deffer duration. fn bonding_duration() -> EraIndex; /// The current era index. @@ -111,39 +80,41 @@ pub trait StakingInterface { /// This should be the latest planned era that the staking system knows about. fn current_era() -> EraIndex; - /// Returns the stake of `who`. - fn stake(who: &Self::AccountId) -> Result, DispatchError>; - - fn total_stake(who: &Self::AccountId) -> Result { - Self::stake(who).map(|s| s.total) - } - - fn active_stake(who: &Self::AccountId) -> Result { - Self::stake(who).map(|s| s.active) - } - - fn is_unbonding(who: &Self::AccountId) -> Result { - Self::stake(who).map(|s| s.active != s.total) - } - - fn fully_unbond(who: &Self::AccountId) -> DispatchResult { - Self::unbond(who, Self::stake(who)?.active) - } - - /// Bond (lock) `value` of `who`'s balance, while forwarding any rewards to `payee`. - fn bond(who: &Self::AccountId, value: Self::Balance, payee: &Self::AccountId) - -> DispatchResult; + /// The amount of active stake that `stash` has in the staking system. + fn active_stake(stash: &Self::AccountId) -> Option; - /// Have `who` nominate `validators`. - fn nominate(who: &Self::AccountId, validators: Vec) -> DispatchResult; - - /// Chill `who`. - fn chill(who: &Self::AccountId) -> DispatchResult; + /// The total stake that `stash` has in the staking system. This includes the + /// [`Self::active_stake`], and any funds currently in the process of unbonding via + /// [`Self::unbond`]. + /// + /// # Note + /// + /// This is only guaranteed to reflect the amount locked by the staking system. If there are + /// non-staking locks on the bonded pair's balance this may not be accurate. + fn total_stake(stash: &Self::AccountId) -> Option; - /// Bond some extra amount in `who`'s free balance against the active bonded balance of - /// the account. The amount extra actually bonded will never be more than `who`'s free + /// Bond (lock) `value` of `stash`'s balance. `controller` will be set as the account + /// controlling `stash`. This creates what is referred to as "bonded pair". + fn bond( + stash: Self::AccountId, + controller: Self::AccountId, + value: Self::Balance, + payee: Self::AccountId, + ) -> DispatchResult; + + /// Have `controller` nominate `validators`. + fn nominate( + controller: Self::AccountId, + validators: sp_std::vec::Vec, + ) -> DispatchResult; + + /// Chill `stash`. + fn chill(controller: Self::AccountId) -> DispatchResult; + + /// Bond some extra amount in the _Stash_'s free balance against the active bonded balance of + /// the account. The amount extra actually bonded will never be more than the _Stash_'s free /// balance. - fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult; + fn bond_extra(stash: Self::AccountId, extra: Self::Balance) -> DispatchResult; /// Schedule a portion of the active bonded balance to be unlocked at era /// [Self::current_era] + [`Self::bonding_duration`]. @@ -154,7 +125,7 @@ pub trait StakingInterface { /// The amount of times this can be successfully called is limited based on how many distinct /// eras funds are schedule to unlock in. Calling [`Self::withdraw_unbonded`] after some unlock /// schedules have reached their unlocking era should allow more calls to this function. - fn unbond(stash: &Self::AccountId, value: Self::Balance) -> DispatchResult; + fn unbond(stash: Self::AccountId, value: Self::Balance) -> DispatchResult; /// Unlock any funds schedule to unlock before or at the current era. /// @@ -164,29 +135,7 @@ pub trait StakingInterface { num_slashing_spans: u32, ) -> Result; - /// The ideal number of active validators. - fn desired_validator_count() -> u32; - - /// Whether or not there is an ongoing election. - fn election_ongoing() -> bool; - - /// Force a current staker to become completely unstaked, immediately. - fn force_unstake(who: Self::AccountId) -> DispatchResult; - - /// Checks whether an account `staker` has been exposed in an era. - fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool; - /// Get the nominations of a stash, if they are a nominator, `None` otherwise. #[cfg(feature = "runtime-benchmarks")] - fn nominations(who: Self::AccountId) -> Option>; - - #[cfg(feature = "runtime-benchmarks")] - fn add_era_stakers( - current_era: &EraIndex, - stash: &Self::AccountId, - exposures: Vec<(Self::AccountId, Self::Balance)>, - ); - - #[cfg(feature = "runtime-benchmarks")] - fn set_current_era(era: EraIndex); + fn nominations(who: Self::AccountId) -> Option>; } diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index f6517b9e9028b..4261063993a52 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -108,10 +108,11 @@ pub trait Offence { } /// A slash fraction of the total exposure that should be slashed for this - /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. + /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. /// - /// `offenders_count` - the count of unique offending authorities for this `TimeSlot`. It is >0. - fn slash_fraction(&self, offenders_count: u32) -> Perbill; + /// `offenders_count` - the count of unique offending authorities. It is >0. + /// `validator_set_count` - the cardinality of the validator set at the time of offence. + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; } /// Errors that may happen on offence reports. diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 860bca2a9de18..1c652966180a7 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hash-db = { version = "0.15.2", default-features = false } log = { version = "0.4.17", optional = true } num-traits = { version = "0.2.8", default-features = false } -parking_lot = { version = "0.12.1", optional = true } +parking_lot = { version = "0.12.0", optional = true } rand = { version = "0.7.2", optional = true } smallvec = "1.8.0" thiserror = { version = "1.0.30", optional = true } @@ -31,11 +31,10 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-trie = { version = "6.0.0", default-features = false, path = "../trie" } [dev-dependencies] -array-bytes = "4.1" +hex-literal = "0.3.4" pretty_assertions = "1.2.1" rand = "0.7.2" sp-runtime = { version = "6.0.0", path = "../runtime" } -trie-db = "0.24.0" assert_matches = "1.5" [features] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 791183c4d7e4d..505b53c800f9e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,11 +17,9 @@ //! State machine backends. These manage the code and storage of contracts. -#[cfg(feature = "std")] -use crate::trie_backend::TrieBackend; use crate::{ - trie_backend_essence::TrieBackendStorage, ChildStorageCollection, StorageCollection, - StorageKey, StorageValue, UsageInfo, + trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, + StorageCollection, StorageKey, StorageValue, UsageInfo, }; use codec::Encode; use hash_db::Hasher; @@ -48,7 +46,9 @@ pub trait Backend: sp_std::fmt::Debug { fn storage(&self, key: &[u8]) -> Result, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error>; + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.storage(key).map(|v| v.map(|v| H::hash(&v))) + } /// Get keyed child storage or None if there is nothing associated. fn child_storage( @@ -62,11 +62,13 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, Self::Error>; + ) -> Result, Self::Error> { + self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) + } /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage_hash(key)?.is_some()) + Ok(self.storage(key)?.is_some()) } /// true if a key exists in child storage. @@ -75,7 +77,7 @@ pub trait Backend: sp_std::fmt::Debug { child_info: &ChildInfo, key: &[u8], ) -> Result { - Ok(self.child_storage_hash(child_info, key)?.is_some()) + Ok(self.child_storage(child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -173,6 +175,10 @@ pub trait Backend: sp_std::fmt::Debug { all } + /// Try convert into trie backend. + fn as_trie_backend(&self) -> Option<&TrieBackend> { + None + } /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. @@ -267,16 +273,6 @@ pub trait Backend: sp_std::fmt::Debug { } } -/// Something that can be converted into a [`TrieBackend`]. -#[cfg(feature = "std")] -pub trait AsTrieBackend> { - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; - - /// Return the type as [`TrieBackend`]. - fn as_trie_backend(&self) -> &TrieBackend; -} - /// Trait that allows consolidate two transactions together. pub trait Consolidate { /// Consolidate two transactions into one. diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index fdc50e3f8f207..6efc847bfbdb7 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -17,13 +17,14 @@ //! Basic implementation for Externalities. -use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; +use crate::{Backend, StorageKey, StorageValue}; use codec::Encode; use hash_db::Hasher; use log::warn; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, TrackedStorageKey, + well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, StorageChild, + TrackedStorageKey, }, traits::Externalities, Blake2Hasher, @@ -34,19 +35,20 @@ use std::{ any::{Any, TypeId}, collections::BTreeMap, iter::FromIterator, + ops::Bound, }; /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - overlay: OverlayedChanges, + inner: Storage, extensions: Extensions, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new(inner: Storage) -> Self { - BasicExternalities { overlay: inner.into(), extensions: Default::default() } + BasicExternalities { inner, extensions: Default::default() } } /// New basic externalities with empty storage. @@ -55,34 +57,13 @@ impl BasicExternalities { } /// Insert key/value - pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.overlay.set_storage(k, Some(v)); + pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { + self.inner.top.insert(k, v) } /// Consume self and returns inner storages pub fn into_storages(self) -> Storage { - Storage { - top: self - .overlay - .changes() - .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) - .collect(), - children_default: self - .overlay - .children() - .map(|(iter, i)| { - ( - i.storage_key().to_vec(), - sp_core::storage::StorageChild { - data: iter - .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) - .collect(), - child_info: i.clone(), - }, - ) - }) - .collect(), - } + self.inner } /// Execute the given closure `f` with the externalities set and initialized with `storage`. @@ -92,7 +73,13 @@ impl BasicExternalities { storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, ) -> R { - let mut ext = Self::new(std::mem::take(storage)); + let mut ext = Self { + inner: Storage { + top: std::mem::take(&mut storage.top), + children_default: std::mem::take(&mut storage.children_default), + }, + extensions: Default::default(), + }; let r = ext.execute_with(f); @@ -121,26 +108,15 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == - other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && - self.overlay - .children() - .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) - .collect::>() == - other - .overlay - .children() - .map(|(iter, i)| { - (i, iter.map(|(k, v)| (k, v.value())).collect::>()) - }) - .collect::>() + self.inner.top.eq(&other.inner.top) && + self.inner.children_default.eq(&other.inner.children_default) } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { fn from_iter>(iter: I) -> Self { let mut t = Self::default(); - iter.into_iter().for_each(|(k, v)| t.insert(k, v)); + t.inner.top.extend(iter); t } } @@ -152,8 +128,11 @@ impl Default for BasicExternalities { } impl From> for BasicExternalities { - fn from(map: BTreeMap) -> Self { - Self::from_iter(map.into_iter()) + fn from(hashmap: BTreeMap) -> Self { + BasicExternalities { + inner: Storage { top: hashmap, children_default: Default::default() }, + extensions: Default::default(), + } } } @@ -161,7 +140,7 @@ impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} fn storage(&self, key: &[u8]) -> Option { - self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) + self.inner.top.get(key).cloned() } fn storage_hash(&self, key: &[u8]) -> Option> { @@ -169,7 +148,11 @@ impl Externalities for BasicExternalities { } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) + self.inner + .children_default + .get(child_info.storage_key()) + .and_then(|child| child.data.get(key)) + .cloned() } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { @@ -177,13 +160,16 @@ impl Externalities for BasicExternalities { } fn next_storage_key(&self, key: &[u8]) -> Option { - self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) + let range = (Bound::Excluded(key), Bound::Unbounded); + self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() } fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - self.overlay - .child_iter_after(child_info.storage_key(), key) - .find_map(|(k, v)| v.value().map(|_| k.to_vec())) + let range = (Bound::Excluded(key), Bound::Unbounded); + self.inner + .children_default + .get(child_info.storage_key()) + .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { @@ -192,7 +178,14 @@ impl Externalities for BasicExternalities { return } - self.overlay.set_storage(key, maybe_value) + match maybe_value { + Some(value) => { + self.inner.top.insert(key, value); + }, + None => { + self.inner.top.remove(&key); + }, + } } fn place_child_storage( @@ -201,7 +194,19 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - self.overlay.set_child_storage(child_info, key, value); + let child_map = self + .inner + .children_default + .entry(child_info.storage_key().to_vec()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }); + if let Some(value) = value { + child_map.data.insert(key, value); + } else { + child_map.data.remove(&key); + } } fn kill_child_storage( @@ -210,7 +215,12 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self.overlay.clear_child_storage(child_info); + let count = self + .inner + .children_default + .remove(child_info.storage_key()) + .map(|c| c.data.len()) + .unwrap_or(0) as u32; MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -229,7 +239,19 @@ impl Externalities for BasicExternalities { return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } - let count = self.overlay.clear_prefix(prefix); + let to_remove = self + .inner + .top + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + .map(|(k, _)| k) + .take_while(|k| k.starts_with(prefix)) + .cloned() + .collect::>(); + + let count = to_remove.len() as u32; + for key in to_remove { + self.inner.top.remove(&key); + } MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -240,37 +262,56 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self.overlay.clear_child_prefix(child_info, prefix); - MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } + if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { + let to_remove = child + .data + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + .map(|(k, _)| k) + .take_while(|k| k.starts_with(prefix)) + .cloned() + .collect::>(); + + let count = to_remove.len() as u32; + for key in to_remove { + child.data.remove(&key); + } + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } + } else { + MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } + } } fn storage_append(&mut self, key: Vec, value: Vec) { - let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); - crate::ext::StorageAppend::new(current_value).append(value); + let current = self.inner.top.entry(key).or_default(); + crate::ext::StorageAppend::new(current).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { - let mut top = self - .overlay - .changes() - .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) - .collect::>(); + let mut top = self.inner.top.clone(); + let prefixed_keys: Vec<_> = self + .inner + .children_default + .iter() + .map(|(_k, v)| (v.child_info.prefixed_storage_key(), v.child_info.clone())) + .collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = empty_child_trie_root::>(); - for child_info in self.overlay.children().map(|d| d.1.clone()).collect::>() { + for (prefixed_storage_key, child_info) in prefixed_keys { let child_root = self.child_storage_root(&child_info, state_version); if empty_hash[..] == child_root[..] { - top.remove(child_info.prefixed_storage_key().as_slice()); + top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(child_info.prefixed_storage_key().into_inner(), child_root); + top.insert(prefixed_storage_key.into_inner(), child_root); } } match state_version { - StateVersion::V0 => LayoutV0::::trie_root(top).as_ref().into(), - StateVersion::V1 => LayoutV1::::trie_root(top).as_ref().into(), + StateVersion::V0 => + LayoutV0::::trie_root(self.inner.top.clone()).as_ref().into(), + StateVersion::V1 => + LayoutV1::::trie_root(self.inner.top.clone()).as_ref().into(), } } @@ -279,11 +320,10 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { - let delta = - data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); + if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { + let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); crate::in_memory_backend::new_in_mem::>() - .child_storage_root(&child_info, delta, state_version) + .child_storage_root(&child.child_info, delta, state_version) .0 } else { empty_child_trie_root::>() @@ -292,15 +332,15 @@ impl Externalities for BasicExternalities { } fn storage_start_transaction(&mut self) { - self.overlay.start_transaction() + unimplemented!("Transactions are not supported by BasicExternalities"); } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - self.overlay.rollback_transaction().map_err(drop) + unimplemented!("Transactions are not supported by BasicExternalities"); } fn storage_commit_transaction(&mut self) -> Result<(), ()> { - self.overlay.commit_transaction().map_err(drop) + unimplemented!("Transactions are not supported by BasicExternalities"); } fn wipe(&mut self) {} @@ -356,6 +396,7 @@ impl sp_externalities::ExtensionStore for BasicExternalities { #[cfg(test)] mod tests { use super::*; + use hex_literal::hex; use sp_core::{ map, storage::{well_known_keys::CODE, Storage, StorageChild}, @@ -367,11 +408,10 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = array_bytes::hex2bytes_unchecked( - "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa", - ); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - assert_eq!(&ext.storage_root(StateVersion::default())[..], &root); + assert_eq!(&ext.storage_root(StateVersion::default())[..], &ROOT); } #[test] diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 06714fb41405a..6df23cdb7096e 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -19,7 +19,6 @@ use crate::{ backend::Backend, trie_backend::TrieBackend, StorageCollection, StorageKey, StorageValue, - TrieBackendBuilder, }; use codec::Codec; use hash_db::Hasher; @@ -47,7 +46,7 @@ where { let db = GenericMemoryDB::default(); // V1 is same as V0 for an empty trie. - TrieBackendBuilder::new(db, empty_trie_root::>()).build() + TrieBackend::new(db, empty_trie_root::>()) } impl TrieBackend, H> @@ -88,14 +87,14 @@ where pub fn update_backend(&self, root: H::Out, changes: GenericMemoryDB) -> Self { let mut clone = self.backend_storage().clone(); clone.consolidate(changes); - TrieBackendBuilder::new(clone, root).build() + Self::new(clone, root) } /// Apply the given transaction to this backend and set the root to the given value. pub fn apply_transaction(&mut self, root: H::Out, transaction: GenericMemoryDB) { let mut storage = sp_std::mem::take(self).into_storage(); storage.consolidate(transaction); - *self = TrieBackendBuilder::new(storage, root).build(); + *self = TrieBackend::new(storage, root); } /// Compare with another in-memory backend. @@ -110,7 +109,7 @@ where KF: KeyFunction + Send + Sync, { fn clone(&self) -> Self { - TrieBackendBuilder::new(self.backend_storage().clone(), *self.root()).build() + TrieBackend::new(self.backend_storage().clone(), *self.root()) } } @@ -157,8 +156,8 @@ where fn from((inners, state_version): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners .children_default - .into_values() - .map(|c| (Some(c.child_info), c.data)) + .into_iter() + .map(|(_k, c)| (Some(c.child_info), c.data)) .collect(); inner.insert(None, inners.top); (inner, state_version).into() @@ -204,7 +203,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::backend::{AsTrieBackend, Backend}; + use crate::backend::Backend; use sp_core::storage::StateVersion; use sp_runtime::traits::BlakeTwo256; @@ -219,7 +218,7 @@ mod tests { vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_version, ); - let trie_backend = storage.as_trie_backend(); + let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1f106593ede34..edc3db7a60e36 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -29,6 +29,8 @@ mod ext; mod in_memory_backend; pub(crate) mod overlayed_changes; #[cfg(feature = "std")] +mod proving_backend; +#[cfg(feature = "std")] mod read_only; mod stats; #[cfg(feature = "std")] @@ -132,7 +134,7 @@ pub use crate::{ StorageTransactionCache, StorageValue, }, stats::{StateMachineStats, UsageInfo, UsageUnit}, - trie_backend::{TrieBackend, TrieBackendBuilder}, + trie_backend::TrieBackend, trie_backend_essence::{Storage, TrieBackendStorage}, }; @@ -142,9 +144,11 @@ mod std_reexport { basic::BasicExternalities, error::{Error, ExecutionError}, in_memory_backend::{new_in_mem, new_in_mem_hash_key}, + proving_backend::{ + create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + }, read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, - trie_backend::create_proof_check_backend, }; pub use sp_trie::{ trie_types::{TrieDBMutV0, TrieDBMutV1}, @@ -154,35 +158,39 @@ mod std_reexport { #[cfg(feature = "std")] mod execution { - use crate::backend::AsTrieBackend; - use super::*; - use codec::Codec; + use codec::{Codec, Decode, Encode}; use hash_db::Hasher; use smallvec::SmallVec; use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; use std::{ collections::{HashMap, HashSet}, fmt, + panic::UnwindSafe, + result, }; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions are protected from being closed by the runtime. qed"; - pub(crate) type CallResult = Result, E>; + pub(crate) type CallResult = Result, E>; /// Default handler of the execution manager. - pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; + pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; + /// Proving Trie backend with in-memory storage. + pub type InMemoryProvingBackend<'a, H> = ProvingBackend<'a, MemoryDB, H>; + /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { @@ -240,7 +248,9 @@ mod execution { impl ExecutionStrategy { /// Gets the corresponding manager for the execution strategy. - pub fn get_manager(self) -> ExecutionManager> { + pub fn get_manager( + self, + ) -> ExecutionManager> { match self { ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), @@ -260,19 +270,19 @@ mod execution { } /// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. - pub fn native_else_wasm() -> ExecutionManager> { + pub fn native_else_wasm() -> ExecutionManager> { ExecutionManager::NativeElseWasm } /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out /// the type. - fn always_wasm() -> ExecutionManager> { + fn always_wasm() -> ExecutionManager> { ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) } /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out /// the type. - fn always_untrusted_wasm() -> ExecutionManager> { + fn always_untrusted_wasm() -> ExecutionManager> { ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) } @@ -374,10 +384,23 @@ mod execution { pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { // We are not giving a native call and thus we are sure that the result can never be a // native value. - self.execute_using_consensus_failure_handler(strategy.get_manager()) + self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + ) + .map(NativeOrEncoded::into_encoded) } - fn execute_aux(&mut self, use_native: bool) -> (CallResult, bool) { + fn execute_aux( + &mut self, + use_native: bool, + native_call: Option, + ) -> (CallResult, bool) + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + { let mut cache = StorageTransactionCache::default(); let cache = match self.storage_transaction_cache.as_mut() { @@ -408,6 +431,7 @@ mod execution { self.method, self.call_data, use_native, + native_call, ); self.overlay @@ -425,20 +449,26 @@ mod execution { (result, was_native) } - fn execute_call_with_both_strategy( + fn execute_call_with_both_strategy( &mut self, + mut native_call: Option, on_consensus_failure: Handler, - ) -> CallResult + ) -> CallResult where - Handler: - FnOnce(CallResult, CallResult) -> CallResult, + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, { self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true); + let (result, was_native) = self.execute_aux(true, native_call.take()); if was_native { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux(false); + let (wasm_result, _) = self.execute_aux(false, native_call); if (result.is_ok() && wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || @@ -454,16 +484,25 @@ mod execution { } } - fn execute_call_with_native_else_wasm_strategy(&mut self) -> CallResult { + fn execute_call_with_native_else_wasm_strategy( + &mut self, + mut native_call: Option, + ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + { self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true); + let (result, was_native) = self.execute_aux(true, native_call.take()); if !was_native || result.is_ok() { self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } else { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - self.execute_aux(false).0 + let (wasm_result, _) = self.execute_aux(false, native_call); + wasm_result } } @@ -476,29 +515,35 @@ mod execution { /// /// Returns the result of the executed function either in native representation `R` or /// in SCALE encoded representation. - pub fn execute_using_consensus_failure_handler( + pub fn execute_using_consensus_failure_handler( &mut self, manager: ExecutionManager, - ) -> Result, Box> + mut native_call: Option, + ) -> Result, Box> where - Handler: - FnOnce(CallResult, CallResult) -> CallResult, + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, { let result = { match manager { - ExecutionManager::Both(on_consensus_failure) => - self.execute_call_with_both_strategy(on_consensus_failure), + ExecutionManager::Both(on_consensus_failure) => self + .execute_call_with_both_strategy(native_call.take(), on_consensus_failure), ExecutionManager::NativeElseWasm => - self.execute_call_with_native_else_wasm_strategy(), + self.execute_call_with_native_else_wasm_strategy(native_call.take()), ExecutionManager::AlwaysWasm(trust_level) => { let _abort_guard = match trust_level { BackendTrustLevel::Trusted => None, BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), }; - self.execute_aux(false).0 + self.execute_aux(false, native_call).0 }, - ExecutionManager::NativeWhenPossible => self.execute_aux(true).0, + ExecutionManager::NativeWhenPossible => self.execute_aux(true, native_call).0, } }; @@ -517,13 +562,15 @@ mod execution { runtime_code: &RuntimeCode, ) -> Result<(Vec, StorageProof), Box> where - B: AsTrieBackend, + B: Backend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_execution_on_trie_backend::<_, _, _, _>( trie_backend, overlay, @@ -560,10 +607,8 @@ mod execution { Exec: CodeExecutor + 'static + Clone, Spawn: SpawnNamed + Send + 'static, { - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); - - let result = StateMachine::<_, H, Exec>::new( + let proving_backend = proving_backend::ProvingBackend::new(trie_backend); + let mut sm = StateMachine::<_, H, Exec>::new( &proving_backend, overlay, exec, @@ -572,14 +617,14 @@ mod execution { Extensions::default(), runtime_code, spawn_handle, - ) - .execute_using_consensus_failure_handler::<_>(always_wasm())?; - - let proof = proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + ); - Ok((result, proof)) + let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )?; + let proof = sm.backend.extract_proof(); + Ok((result.into_encoded(), proof)) } /// Check execution proof, generated by `prove_execution` call. @@ -594,7 +639,7 @@ mod execution { runtime_code: &RuntimeCode, ) -> Result, Box> where - H: Hasher + 'static, + H: Hasher, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, Spawn: SpawnNamed + Send + 'static, @@ -627,7 +672,7 @@ mod execution { Exec: CodeExecutor + Clone + 'static, Spawn: SpawnNamed + Send + 'static, { - StateMachine::<_, H, Exec>::new( + let mut sm = StateMachine::<_, H, Exec>::new( trie_backend, overlay, exec, @@ -636,20 +681,27 @@ mod execution { Extensions::default(), runtime_code, spawn_handle, + ); + + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_untrusted_wasm(), + None, ) - .execute_using_consensus_failure_handler(always_untrusted_wasm()) + .map(NativeOrEncoded::into_encoded) } /// Generate storage read proof. pub fn prove_read(backend: B, keys: I) -> Result> where - B: AsTrieBackend, + B: Backend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_read_on_trie_backend(trie_backend, keys) } @@ -777,11 +829,13 @@ mod execution { start_at: &[Vec], ) -> Result<(StorageProof, u32), Box> where - B: AsTrieBackend, + B: Backend, H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at) } @@ -802,9 +856,7 @@ mod execution { return Err(Box::new("Invalid start of range.")) } - let recorder = sp_trie::recorder::Recorder::default(); - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); + let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); let mut count = 0; let mut child_roots = HashSet::new(); @@ -872,7 +924,7 @@ mod execution { // do not add two child trie with same root true } - } else if recorder.estimate_encoded_size() <= size_limit { + } else if proving_backend.estimate_encoded_size() <= size_limit { count += 1; true } else { @@ -896,11 +948,7 @@ mod execution { start_at = None; } } - - let proof = proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed"); - Ok((proof, count)) + Ok((proving_backend.extract_proof(), count)) } /// Generate range storage read proof. @@ -912,11 +960,13 @@ mod execution { start_at: Option<&[u8]>, ) -> Result<(StorageProof, u32), Box> where - B: AsTrieBackend, + B: Backend, H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_range_read_with_size_on_trie_backend( trie_backend, child_info, @@ -939,9 +989,7 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let recorder = sp_trie::recorder::Recorder::default(); - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); + let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); let mut count = 0; proving_backend .apply_to_key_values_while( @@ -949,7 +997,7 @@ mod execution { prefix, start_at, |_key, _value| { - if count == 0 || recorder.estimate_encoded_size() <= size_limit { + if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { count += 1; true } else { @@ -959,11 +1007,7 @@ mod execution { false, ) .map_err(|e| Box::new(e) as Box)?; - - let proof = proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed"); - Ok((proof, count)) + Ok((proving_backend.extract_proof(), count)) } /// Generate child storage read proof. @@ -973,13 +1017,15 @@ mod execution { keys: I, ) -> Result> where - B: AsTrieBackend, + B: Backend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_child_read_on_trie_backend(trie_backend, child_info, keys) } @@ -995,17 +1041,13 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - - Ok(proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed")) + Ok(proving_backend.extract_proof()) } /// Generate storage read proof on pre-created trie backend. @@ -1021,17 +1063,13 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = - TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - - Ok(proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed")) + Ok(proving_backend.extract_proof()) } /// Check storage read proof, generated by `prove_read` call. @@ -1041,7 +1079,7 @@ mod execution { keys: I, ) -> Result, Option>>, Box> where - H: Hasher + 'static, + H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, @@ -1066,7 +1104,7 @@ mod execution { start_at: &[Vec], ) -> Result<(KeyValueStates, usize), Box> where - H: Hasher + 'static, + H: Hasher, H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; @@ -1083,7 +1121,7 @@ mod execution { start_at: Option<&[u8]>, ) -> Result<(Vec<(Vec, Vec)>, bool), Box> where - H: Hasher + 'static, + H: Hasher, H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; @@ -1104,7 +1142,7 @@ mod execution { keys: I, ) -> Result, Option>>, Box> where - H: Hasher + 'static, + H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, @@ -1308,19 +1346,23 @@ mod execution { #[cfg(test)] mod tests { - use super::{backend::AsTrieBackend, ext::Ext, *}; + use super::{ext::Ext, *}; use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; use assert_matches::assert_matches; - use codec::Encode; + use codec::{Decode, Encode}; use sp_core::{ map, storage::{ChildInfo, StateVersion}, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; - use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1}; - use std::collections::{BTreeMap, HashMap}; + use std::{ + collections::{BTreeMap, HashMap}, + panic::UnwindSafe, + result, + }; #[derive(Clone)] struct DummyCodeExecutor { @@ -1332,20 +1374,28 @@ mod tests { impl CodeExecutor for DummyCodeExecutor { type Error = u8; - fn call( + fn call< + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result> + UnwindSafe, + >( &self, ext: &mut dyn Externalities, _: &RuntimeCode, _method: &str, _data: &[u8], use_native: bool, - ) -> (CallResult, bool) { + native_call: Option, + ) -> (CallResult, bool) { let using_native = use_native && self.native_available; - match (using_native, self.native_succeeds, self.fallback_succeeds) { - (true, true, _) | (false, _, true) => ( - Ok(vec![ + match (using_native, self.native_succeeds, self.fallback_succeeds, native_call) { + (true, true, _, Some(call)) => { + let res = sp_externalities::set_and_run_with_externalities(ext, call); + (res.map(NativeOrEncoded::Native).map_err(|_| 0), true) + }, + (true, true, _, None) | (false, _, true, None) => ( + Ok(NativeOrEncoded::Encoded(vec![ ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], - ]), + ])), using_native, ), _ => (Err(0), using_native), @@ -1369,7 +1419,7 @@ mod tests { execute_works_inner(StateVersion::V1); } fn execute_works_inner(state_version: StateVersion) { - let backend = trie_backend::tests::test_trie(state_version, None, None); + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1397,7 +1447,7 @@ mod tests { execute_works_with_native_else_wasm_inner(StateVersion::V1); } fn execute_works_with_native_else_wasm_inner(state_version: StateVersion) { - let backend = trie_backend::tests::test_trie(state_version, None, None); + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1426,7 +1476,7 @@ mod tests { } fn dual_execution_strategy_detects_consensus_failure_inner(state_version: StateVersion) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(state_version, None, None); + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1446,10 +1496,13 @@ mod tests { ); assert!(state_machine - .execute_using_consensus_failure_handler(ExecutionManager::Both(|we, _ne| { - consensus_failed = true; - we - }),) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + ExecutionManager::Both(|we, _ne| { + consensus_failed = true; + we + }), + None, + ) .is_err()); assert!(consensus_failed); } @@ -1467,7 +1520,7 @@ mod tests { }; // fetch execution proof from 'remote' full node - let mut remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let mut remote_backend = trie_backend::tests::test_trie(state_version); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let (remote_result, remote_proof) = prove_execution( &mut remote_backend, @@ -1507,7 +1560,7 @@ mod tests { b"bbb".to_vec() => b"3".to_vec() ]; let state = InMemoryBackend::::from((initial, StateVersion::default())); - let backend = state.as_trie_backend(); + let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); @@ -1663,7 +1716,7 @@ mod tests { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend(); + let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); @@ -1679,7 +1732,7 @@ mod tests { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend(); + let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { @@ -1716,7 +1769,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend(); + let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); // For example, block initialization with event. @@ -1787,7 +1840,7 @@ mod tests { let child_info = &child_info; let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1804,7 +1857,7 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1871,8 +1924,8 @@ mod tests { let trie: InMemoryBackend = (storage.clone(), StateVersion::default()).into(); - let trie_root = *trie.root(); - let backend = TrieBackendBuilder::wrap(&trie).with_recorder(Default::default()).build(); + let trie_root = trie.root(); + let backend = crate::ProvingBackend::new(&trie); let mut queries = Vec::new(); for c in 0..(5 + nb_child_trie / 2) { // random existing query @@ -1917,10 +1970,10 @@ mod tests { } } - let storage_proof = backend.extract_proof().expect("Failed to extract proof"); + let storage_proof = backend.extract_proof(); let remote_proof = test_compact(storage_proof, &trie_root); let proof_check = - create_proof_check_backend::(trie_root, remote_proof).unwrap(); + create_proof_check_backend::(*trie_root, remote_proof).unwrap(); for (child_info, key, expected) in queries { assert_eq!( @@ -1934,18 +1987,18 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { let state_version = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let remote_root = remote_backend.storage_root(::std::iter::empty(), state_version).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Always contains at least some nodes. - assert_eq!(proof.to_memory_db::().drain().len(), 3); + assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); - assert_eq!(proof.to_memory_db::().drain().len(), 9); + assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); assert_eq!(count, 85); let (results, completed) = read_range_proof_check::( remote_root, @@ -1965,10 +2018,10 @@ mod tests { assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); - assert_eq!(proof.to_memory_db::().drain().len(), 11); + assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); assert_eq!(count, 132); let (results, completed) = read_range_proof_check::(remote_root, proof, None, None, None, None) @@ -1982,7 +2035,7 @@ mod tests { let mut state_version = StateVersion::V0; let (mut mdb, mut root) = trie_backend::tests::test_db(state_version); { - let mut trie = TrieDBMutBuilderV0::from_existing(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutV0::from_existing(&mut mdb, &mut root).unwrap(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash @@ -1992,7 +2045,7 @@ mod tests { } let check_proof = |mdb, root, state_version| -> StorageProof { - let remote_backend = TrieBackendBuilder::new(mdb, root).build(); + let remote_backend = TrieBackend::new(mdb, root); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally @@ -2016,7 +2069,7 @@ mod tests { // do switch state_version = StateVersion::V1; { - let mut trie = TrieDBMutBuilderV1::from_existing(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutV1::from_existing(&mut mdb, &mut root).unwrap(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); // update with same value do change @@ -2035,10 +2088,10 @@ mod tests { #[test] fn prove_range_with_child_works() { let state_version = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); - let trie_backend = remote_backend.as_trie_backend(); + let trie_backend = remote_backend.as_trie_backend().unwrap(); let max_iter = 1000; let mut nb_loop = 0; loop { @@ -2053,7 +2106,7 @@ mod tests { ) .unwrap(); // Always contains at least some nodes. - assert!(proof.to_memory_db::().drain().len() > 0); + assert!(proof.clone().into_memory_db::().drain().len() > 0); assert!(count < 3); // when doing child we include parent and first child key. let (result, completed_depth) = read_range_proof_check_with_child::( @@ -2085,7 +2138,7 @@ mod tests { let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let remote_backend = trie_backend::tests::test_trie(state_version, None, None); + let remote_backend = trie_backend::tests::test_trie(state_version); let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), @@ -2117,9 +2170,9 @@ mod tests { .into_iter(), state_version, ); - let mut remote_storage = remote_backend.backend_storage().clone(); + let mut remote_storage = remote_backend.into_storage(); remote_storage.consolidate(transaction); - let remote_backend = TrieBackendBuilder::new(remote_storage, remote_root).build(); + let remote_backend = TrieBackend::new(remote_storage, remote_root); let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -2145,7 +2198,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(state_version, None, None); + let backend = test_trie(state_version); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); @@ -2171,7 +2224,7 @@ mod tests { b"bbb".to_vec() => b"".to_vec() ]; let state = InMemoryBackend::::from((initial, StateVersion::default())); - let backend = state.as_trie_backend(); + let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); @@ -2193,4 +2246,51 @@ mod tests { overlay.commit_transaction().unwrap(); assert_eq!(overlay.storage(b"ccc"), Some(None)); } + + #[test] + fn runtime_registered_extensions_are_removed_after_execution() { + let state_version = StateVersion::default(); + use sp_externalities::ExternalitiesExt; + sp_externalities::decl_extension! { + struct DummyExt(u32); + } + + let backend = trie_backend::tests::test_trie(state_version); + let mut overlayed_changes = Default::default(); + let wasm_code = RuntimeCode::empty(); + + let mut state_machine = StateMachine::new( + &backend, + &mut overlayed_changes, + &DummyCodeExecutor { + native_available: true, + native_succeeds: true, + fallback_succeeds: false, + }, + "test", + &[], + Default::default(), + &wasm_code, + TaskExecutor::new(), + ); + + let run_state_machine = |state_machine: &mut StateMachine<_, _, _>| { + state_machine + .execute_using_consensus_failure_handler:: _, _, _>( + ExecutionManager::NativeWhenPossible, + Some(|| { + sp_externalities::with_externalities(|mut ext| { + ext.register_extension(DummyExt(2)).unwrap(); + }) + .unwrap(); + + Ok(()) + }), + ) + .unwrap(); + }; + + run_state_machine(&mut state_machine); + run_state_machine(&mut state_machine); + } } diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index e5dad7157c731..ae5d47f6bb943 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -57,7 +57,7 @@ pub struct NotInRuntime; /// Describes in which mode the node is currently executing. #[derive(Debug, Clone, Copy)] pub enum ExecutionMode { - /// Executing in client mode: Removal of all transactions possible. + /// Exeuting in client mode: Removal of all transactions possible. Client, /// Executing in runtime mode: Transactions started by the client are protected. Runtime, @@ -95,7 +95,7 @@ pub type OverlayedChangeSet = OverlayedMap>; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] -pub struct OverlayedMap { +pub struct OverlayedMap { /// Stores the changes that this overlay constitutes. changes: BTreeMap>, /// Stores which keys are dirty per transaction. Needed in order to determine which @@ -110,7 +110,7 @@ pub struct OverlayedMap { execution_mode: ExecutionMode, } -impl Default for OverlayedMap { +impl Default for OverlayedMap { fn default() -> Self { Self { changes: BTreeMap::new(), @@ -121,31 +121,6 @@ impl Default for OverlayedMap { } } -#[cfg(feature = "std")] -impl From for OverlayedMap> { - fn from(storage: sp_core::storage::StorageMap) -> Self { - Self { - changes: storage - .into_iter() - .map(|(k, v)| { - ( - k, - OverlayedEntry { - transactions: SmallVec::from_iter([InnerValue { - value: Some(v), - extrinsics: Default::default(), - }]), - }, - ) - }) - .collect(), - dirty_keys: Default::default(), - num_client_transactions: 0, - execution_mode: ExecutionMode::Client, - } - } -} - impl Default for ExecutionMode { fn default() -> Self { Self::Client diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 9eb26d52f79f8..746599a6768e6 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -474,7 +474,7 @@ impl OverlayedChanges { pub fn children( &self, ) -> impl Iterator, &ChildInfo)> { - self.children.values().map(|v| (v.0.changes(), &v.1)) + self.children.iter().map(|(_, v)| (v.0.changes(), &v.1)) } /// Get an iterator over all top changes as been by the current transaction. @@ -638,21 +638,6 @@ impl OverlayedChanges { } } -#[cfg(feature = "std")] -impl From for OverlayedChanges { - fn from(storage: sp_core::storage::Storage) -> Self { - Self { - top: storage.top.into(), - children: storage - .children_default - .into_iter() - .map(|(k, v)| (k, (v.data.into(), v.child_info))) - .collect(), - ..Default::default() - } - } -} - #[cfg(feature = "std")] fn retain_map(map: &mut Map, f: F) where @@ -743,6 +728,7 @@ impl<'a> OverlayedExtensions<'a> { mod tests { use super::*; use crate::{ext::Ext, InMemoryBackend}; + use hex_literal::hex; use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; @@ -869,11 +855,10 @@ mod tests { let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); - let root = array_bytes::hex2bytes_unchecked( - "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa", - ); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - assert_eq!(&ext.storage_root(state_version)[..], &root); + assert_eq!(&ext.storage_root(state_version)[..], &ROOT); } #[test] diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs new file mode 100644 index 0000000000000..f5cf542908b10 --- /dev/null +++ b/primitives/state-machine/src/proving_backend.rs @@ -0,0 +1,611 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Proving state machine backend. + +use crate::{ + trie_backend::TrieBackend, + trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, + Backend, DBValue, Error, ExecutionError, +}; +use codec::{Codec, Decode, Encode}; +use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; +use log::debug; +use parking_lot::RwLock; +use sp_core::storage::{ChildInfo, StateVersion}; +pub use sp_trie::trie_types::TrieError; +use sp_trie::{ + empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, + LayoutV1, MemoryDB, Recorder, StorageProof, +}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; + +/// Patricia trie-based backend specialized in get value proofs. +pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { + pub(crate) backend: &'a TrieBackendEssence, + pub(crate) proof_recorder: &'a mut Recorder, +} + +impl<'a, S, H> ProvingBackendRecorder<'a, S, H> +where + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, +{ + /// Produce proof for a key query. + pub fn storage(&mut self, key: &[u8]) -> Result>, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let map_e = |e| format!("Trie lookup error: {}", e); + + // V1 is equivalent to V0 on read. + read_trie_value_with::, _, Ephemeral>( + &eph, + self.backend.root(), + key, + &mut *self.proof_recorder, + ) + .map_err(map_e) + } + + /// Produce proof for a child key query. + pub fn child_storage( + &mut self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, String> { + let storage_key = child_info.storage_key(); + let root = self + .storage(storage_key)? + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + // V1 is equivalent to V0 on empty trie + .unwrap_or_else(empty_child_trie_root::>); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let map_e = |e| format!("Trie lookup error: {}", e); + + // V1 is equivalent to V0 on read + read_child_trie_value_with::, _, _>( + child_info.keyspace(), + &eph, + root.as_ref(), + key, + &mut *self.proof_recorder, + ) + .map_err(map_e) + } + + /// Produce proof for the whole backend. + pub fn record_all_keys(&mut self) { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let mut iter = move || -> Result<(), Box>> { + let root = self.backend.root(); + // V1 and V is equivalent to V0 on read and recorder is key read. + record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) + }; + + if let Err(e) = iter() { + debug!(target: "trie", "Error while recording all keys: {}", e); + } + } +} + +#[derive(Default)] +struct ProofRecorderInner { + /// All the records that we have stored so far. + records: HashMap>, + /// The encoded size of all recorded values. + encoded_size: usize, +} + +/// Global proof recorder, act as a layer over a hash db for recording queried data. +#[derive(Clone, Default)] +pub struct ProofRecorder { + inner: Arc>>, +} + +impl ProofRecorder { + /// Record the given `key` => `val` combination. + pub fn record(&self, key: Hash, val: Option) { + let mut inner = self.inner.write(); + let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { + let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); + + entry.insert(val); + encoded_size + } else { + 0 + }; + + inner.encoded_size += encoded_size; + } + + /// Returns the value at the given `key`. + pub fn get(&self, key: &Hash) -> Option> { + self.inner.read().records.get(key).cloned() + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual + /// encoded proof. + pub fn estimate_encoded_size(&self) -> usize { + let inner = self.inner.read(); + inner.encoded_size + codec::Compact(inner.records.len() as u32).encoded_size() + } + + /// Convert into a [`StorageProof`]. + pub fn to_storage_proof(&self) -> StorageProof { + StorageProof::new( + self.inner + .read() + .records + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())), + ) + } + + /// Reset the internal state. + pub fn reset(&self) { + let mut inner = self.inner.write(); + inner.records.clear(); + inner.encoded_size = 0; + } +} + +/// Patricia trie-based backend which also tracks all touched storage trie values. +/// These can be sent to remote node and used as a proof of execution. +pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher>( + TrieBackend, H>, +); + +/// Trie backend storage with its proof recorder. +pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { + backend: &'a S, + proof_recorder: ProofRecorder, +} + +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> +where + H::Out: Codec, +{ + /// Create new proving backend. + pub fn new(backend: &'a TrieBackend) -> Self { + let proof_recorder = Default::default(); + Self::new_with_recorder(backend, proof_recorder) + } + + /// Create new proving backend with the given recorder. + pub fn new_with_recorder( + backend: &'a TrieBackend, + proof_recorder: ProofRecorder, + ) -> Self { + let essence = backend.essence(); + let root = *essence.root(); + let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder }; + ProvingBackend(TrieBackend::new(recorder, root)) + } + + /// Extracting the gathered unordered proof. + pub fn extract_proof(&self) -> StorageProof { + self.0.essence().backend_storage().proof_recorder.to_storage_proof() + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual + /// encoded proof. + pub fn estimate_encoded_size(&self) -> usize { + self.0.essence().backend_storage().proof_recorder.estimate_encoded_size() + } + + /// Clear the proof recorded data. + pub fn clear_recorder(&self) { + self.0.essence().backend_storage().proof_recorder.reset() + } +} + +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage + for ProofRecorderBackend<'a, S, H> +{ + type Overlay = S::Overlay; + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + if let Some(v) = self.proof_recorder.get(key) { + return Ok(v) + } + + let backend_value = self.backend.get(key, prefix)?; + self.proof_recorder.record(*key, backend_value.clone()); + Ok(backend_value) + } +} + +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug + for ProvingBackend<'a, S, H> +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ProvingBackend") + } +} + +impl<'a, S, H> Backend for ProvingBackend<'a, S, H> +where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + H::Out: Ord + Codec, +{ + type Error = String; + type Transaction = S::Overlay; + type TrieBackendStorage = S; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.0.storage(key) + } + + fn child_storage( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.0.child_storage(child_info, key) + } + + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.0.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + ) { + self.0.apply_to_keys_while(child_info, prefix, start_at, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.0.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.0.next_child_storage_key(child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.0.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.0.for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + child_info: &ChildInfo, + prefix: &[u8], + f: F, + ) { + self.0.for_child_keys_with_prefix(child_info, prefix, f) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.0.pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.0.keys(prefix) + } + + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { + self.0.child_keys(child_info, prefix) + } + + fn storage_root<'b>( + &self, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { + self.0.storage_root(delta, state_version) + } + + fn child_storage_root<'b>( + &self, + child_info: &ChildInfo, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { + self.0.child_storage_root(child_info, delta, state_version) + } + + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} + + fn usage_info(&self) -> crate::stats::UsageInfo { + self.0.usage_info() + } +} + +/// Create a backend used for checking the proof., using `H` as hasher. +/// +/// `proof` and `root` must match, i.e. `root` must be the correct root of `proof` nodes. +pub fn create_proof_check_backend( + root: H::Out, + proof: StorageProof, +) -> Result, H>, Box> +where + H: Hasher, + H::Out: Codec, +{ + let db = proof.into_memory_db(); + + if db.contains(&root, EMPTY_PREFIX) { + Ok(TrieBackend::new(db, root)) + } else { + Err(Box::new(ExecutionError::InvalidProof)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + proving_backend::create_proof_check_backend, trie_backend::tests::test_trie, + InMemoryBackend, + }; + use sp_core::H256; + use sp_runtime::traits::BlakeTwo256; + use sp_trie::PrefixedMemoryDB; + + fn test_proving( + trie_backend: &TrieBackend, BlakeTwo256>, + ) -> ProvingBackend, BlakeTwo256> { + ProvingBackend::new(trie_backend) + } + + #[test] + fn proof_is_empty_until_value_is_read() { + proof_is_empty_until_value_is_read_inner(StateVersion::V0); + proof_is_empty_until_value_is_read_inner(StateVersion::V1); + } + fn proof_is_empty_until_value_is_read_inner(test_hash: StateVersion) { + let trie_backend = test_trie(test_hash); + assert!(test_proving(&trie_backend).extract_proof().is_empty()); + } + + #[test] + fn proof_is_non_empty_after_value_is_read() { + proof_is_non_empty_after_value_is_read_inner(StateVersion::V0); + proof_is_non_empty_after_value_is_read_inner(StateVersion::V1); + } + fn proof_is_non_empty_after_value_is_read_inner(test_hash: StateVersion) { + let trie_backend = test_trie(test_hash); + let backend = test_proving(&trie_backend); + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + assert!(!backend.extract_proof().is_empty()); + } + + #[test] + fn proof_is_invalid_when_does_not_contains_root() { + let result = create_proof_check_backend::( + H256::from_low_u64_be(1), + StorageProof::empty(), + ); + assert!(result.is_err()); + } + + #[test] + fn passes_through_backend_calls() { + passes_through_backend_calls_inner(StateVersion::V0); + passes_through_backend_calls_inner(StateVersion::V1); + } + fn passes_through_backend_calls_inner(state_version: StateVersion) { + let trie_backend = test_trie(state_version); + let proving_backend = test_proving(&trie_backend); + assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = + trie_backend.storage_root(std::iter::empty(), state_version); + let (proving_root, mut proving_mdb) = + proving_backend.storage_root(std::iter::empty(), state_version); + assert_eq!(trie_root, proving_root); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + } + + #[test] + fn proof_recorded_and_checked_top() { + proof_recorded_and_checked_inner(StateVersion::V0); + proof_recorded_and_checked_inner(StateVersion::V1); + } + fn proof_recorded_and_checked_inner(state_version: StateVersion) { + let size_content = 34; // above hashable value treshold. + let value_range = 0..64; + let contents = value_range + .clone() + .map(|i| (vec![i], Some(vec![i; size_content]))) + .collect::>(); + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)], state_version); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + value_range.clone().for_each(|i| { + assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) + }); + + let trie = in_memory.as_trie_backend().unwrap(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + value_range + .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + + let proving = ProvingBackend::new(trie); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + + let proof = proving.extract_proof(); + + let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + } + + #[test] + fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner(StateVersion::V0); + proof_recorded_and_checked_with_child_inner(StateVersion::V1); + } + fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + ]; + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(contents, state_version); + let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) + }); + (10..15).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) + }); + + let trie = in_memory.as_trie_backend().unwrap(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = ProvingBackend::new(trie); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof(); + + let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); + assert!(proof_check.storage(&[0]).is_err()); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + // note that it is include in root because proof close + assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); + assert_eq!(proof_check.storage(&[64]).unwrap(), None); + + let proving = ProvingBackend::new(trie); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); + + let proof = proving.extract_proof(); + let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); + assert_eq!(proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64]); + } + + #[test] + fn storage_proof_encoded_size_estimation_works() { + storage_proof_encoded_size_estimation_works_inner(StateVersion::V0); + storage_proof_encoded_size_estimation_works_inner(StateVersion::V1); + } + fn storage_proof_encoded_size_estimation_works_inner(state_version: StateVersion) { + let trie_backend = test_trie(state_version); + let backend = test_proving(&trie_backend); + + let check_estimation = + |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { + let storage_proof = backend.extract_proof(); + let estimation = + backend.0.essence().backend_storage().proof_recorder.estimate_encoded_size(); + + assert_eq!(storage_proof.encoded_size(), estimation); + }; + + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + check_estimation(&backend); + + assert_eq!(backend.storage(b"value1").unwrap(), Some(vec![42])); + check_estimation(&backend); + + assert_eq!(backend.storage(b"value2").unwrap(), Some(vec![24])); + check_estimation(&backend); + + assert!(backend.storage(b"doesnotexist").unwrap().is_none()); + check_estimation(&backend); + + assert!(backend.storage(b"doesnotexist2").unwrap().is_none()); + check_estimation(&backend); + } + + #[test] + fn proof_recorded_for_same_execution_should_be_deterministic() { + let storage_changes = vec![ + (H256::random(), Some(b"value1".to_vec())), + (H256::random(), Some(b"value2".to_vec())), + (H256::random(), Some(b"value3".to_vec())), + (H256::random(), Some(b"value4".to_vec())), + (H256::random(), Some(b"value5".to_vec())), + (H256::random(), Some(b"value6".to_vec())), + (H256::random(), Some(b"value7".to_vec())), + (H256::random(), Some(b"value8".to_vec())), + ]; + + let proof_recorder = + ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; + storage_changes + .clone() + .into_iter() + .for_each(|(key, val)| proof_recorder.record(key, val)); + let proof1 = proof_recorder.to_storage_proof(); + + let proof_recorder = + ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; + storage_changes + .into_iter() + .for_each(|(key, val)| proof_recorder.record(key, val)); + let proof2 = proof_recorder.to_storage_proof(); + + assert_eq!(proof1, proof2); + } +} diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index e58fb760f4d7e..622915a2d0525 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -23,6 +23,7 @@ use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, StateVersion, TrackedStorageKey}, traits::Externalities, + Blake2Hasher, }; use sp_externalities::MultiRemovalResults; use std::{ @@ -43,10 +44,7 @@ pub trait InspectState> { fn inspect_state R, R>(&self, f: F) -> R; } -impl> InspectState for B -where - H::Out: Encode, -{ +impl> InspectState for B { fn inspect_state R, R>(&self, f: F) -> R { ReadOnlyExternalities::from(self).execute_with(f) } @@ -68,10 +66,7 @@ impl<'a, H: Hasher, B: 'a + Backend> From<&'a B> for ReadOnlyExternalities<'a } } -impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> -where - H::Out: Encode, -{ +impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> { /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure. @@ -80,10 +75,7 @@ where } } -impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities<'a, H, B> -where - H::Out: Encode, -{ +impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities<'a, H, B> { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) { panic!("Should not be used in read-only externalities!") } @@ -95,10 +87,7 @@ where } fn storage_hash(&self, key: &[u8]) -> Option> { - self.backend - .storage_hash(key) - .expect("Backed failed for storage_hash in ReadOnlyExternalities") - .map(|h| h.encode()) + self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { @@ -108,10 +97,7 @@ where } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - self.backend - .child_storage_hash(child_info, key) - .expect("Backed failed for child_storage_hash in ReadOnlyExternalities") - .map(|h| h.encode()) + self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn next_storage_key(&self, key: &[u8]) -> Option { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index e94d34b5560cd..57d4f0b4898eb 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -24,7 +24,7 @@ use std::{ use crate::{ backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, - StorageTransactionCache, StorageValue, TrieBackendBuilder, + StorageTransactionCache, StorageValue, }; use hash_db::Hasher; @@ -41,9 +41,8 @@ use sp_externalities::{Extension, ExtensionStore, Extensions}; use sp_trie::StorageProof; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities +pub struct TestExternalities where - H: Hasher + 'static, H::Out: codec::Codec + Ord, { /// The overlay changed storage. @@ -59,9 +58,8 @@ where pub state_version: StateVersion, } -impl TestExternalities +impl TestExternalities where - H: Hasher + 'static, H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. @@ -204,9 +202,7 @@ where /// This implementation will wipe the proof recorded in between calls. Consecutive calls will /// get their own proof from scratch. pub fn execute_and_prove(&mut self, execute: impl FnOnce() -> R) -> (R, StorageProof) { - let proving_backend = TrieBackendBuilder::wrap(&self.backend) - .with_recorder(Default::default()) - .build(); + let proving_backend = crate::InMemoryProvingBackend::new(&self.backend); let mut proving_ext = Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, @@ -215,7 +211,7 @@ where ); let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); - let proof = proving_backend.extract_proof().expect("Failed to extract storage proof"); + let proof = proving_backend.extract_proof(); (outcome, proof) } @@ -334,6 +330,7 @@ where #[cfg(test)] mod tests { use super::*; + use hex_literal::hex; use sp_core::{storage::ChildInfo, traits::Externalities, H256}; use sp_runtime::traits::BlakeTwo256; @@ -345,9 +342,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = array_bytes::hex_n_into_unchecked::( - "ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489", - ); + let root = + H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); assert_eq!(H256::from_slice(ext.storage_root(Default::default()).as_slice()), root); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index da4250b6ba3e1..130b4bf178202 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -17,182 +17,31 @@ //! Trie-based state machine backend. -#[cfg(feature = "std")] -use crate::backend::AsTrieBackend; use crate::{ trie_backend_essence::{TrieBackendEssence, TrieBackendStorage}, Backend, StorageKey, StorageValue, }; use codec::Codec; -#[cfg(feature = "std")] -use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; use sp_std::vec::Vec; -#[cfg(feature = "std")] -use sp_trie::{cache::LocalTrieCache, recorder::Recorder}; -#[cfg(feature = "std")] -use sp_trie::{MemoryDB, StorageProof}; -/// Dummy type to be used in `no_std`. -/// -/// This is required to have the type available for [`TrieBackendBuilder`] and [`TrieBackend`]. -#[cfg(not(feature = "std"))] -pub struct LocalTrieCache(sp_std::marker::PhantomData); - -/// Special trait to support taking the [`LocalTrieCache`] by value or by reference. -/// -/// This trait is internal use only and to emphasize this, the trait is sealed. -pub trait AsLocalTrieCache: sealed::Sealed { - /// Returns `self` as [`LocalTrieCache`]. - #[cfg(feature = "std")] - fn as_local_trie_cache(&self) -> &LocalTrieCache; -} - -impl AsLocalTrieCache for LocalTrieCache { - #[cfg(feature = "std")] - fn as_local_trie_cache(&self) -> &LocalTrieCache { - self - } -} - -#[cfg(feature = "std")] -impl AsLocalTrieCache for &LocalTrieCache { - fn as_local_trie_cache(&self) -> &LocalTrieCache { - self - } -} - -/// Special module that contains the `Sealed` trait. -mod sealed { - use super::*; - - /// A special trait which prevents externals to implement the [`AsLocalTrieCache`] outside - /// of this crate. - pub trait Sealed {} - - impl Sealed for LocalTrieCache {} - impl Sealed for &LocalTrieCache {} -} - -/// Builder for creating a [`TrieBackend`]. -pub struct TrieBackendBuilder, H: Hasher, C = LocalTrieCache> { - storage: S, - root: H::Out, - #[cfg(feature = "std")] - recorder: Option>, - cache: Option, +/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +pub struct TrieBackend, H: Hasher> { + pub(crate) essence: TrieBackendEssence, } -impl TrieBackendBuilder> +impl, H: Hasher> TrieBackend where - S: TrieBackendStorage, - H: Hasher, + H::Out: Codec, { - /// Create a new builder instance. + /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: None, - } - } -} - -impl TrieBackendBuilder -where - S: TrieBackendStorage, - H: Hasher, -{ - /// Wrap the given [`TrieBackend`]. - /// - /// This can be used for example if all accesses to the trie should - /// be recorded while some other functionality still uses the non-recording - /// backend. - /// - /// The backend storage and the cache will be taken from `other`. - pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> { - TrieBackendBuilder { - storage: other.essence.backend_storage(), - root: *other.essence.root(), - #[cfg(feature = "std")] - recorder: None, - #[cfg(feature = "std")] - cache: other.essence.trie_node_cache.as_ref(), - #[cfg(not(feature = "std"))] - cache: None, - } + TrieBackend { essence: TrieBackendEssence::new(storage, root) } } - /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_optional_recorder(self, recorder: Option>) -> Self { - Self { recorder, ..self } - } - - /// Use the given `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_recorder(self, recorder: Recorder) -> Self { - Self { recorder: Some(recorder), ..self } - } - - /// Use the given optional `cache` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { - TrieBackendBuilder { - cache, - root: self.root, - storage: self.storage, - recorder: self.recorder, - } - } - - /// Use the given `cache` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { - TrieBackendBuilder { - cache: Some(cache), - root: self.root, - storage: self.storage, - recorder: self.recorder, - } - } - - /// Build the configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn build(self) -> TrieBackend { - TrieBackend { - essence: TrieBackendEssence::new_with_cache_and_recorder( - self.storage, - self.root, - self.cache, - self.recorder, - ), - } - } - - /// Build the configured [`TrieBackend`]. - #[cfg(not(feature = "std"))] - pub fn build(self) -> TrieBackend { - let _ = self.cache; - - TrieBackend { essence: TrieBackendEssence::new(self.storage, self.root) } - } -} - -/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher, C = LocalTrieCache> { - pub(crate) essence: TrieBackendEssence, -} - -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> TrieBackend -where - H::Out: Codec, -{ /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } @@ -210,26 +59,15 @@ where pub fn into_storage(self) -> S { self.essence.into_storage() } - - /// Extract the [`StorageProof`]. - /// - /// This only returns `Some` when there was a recorder set. - #[cfg(feature = "std")] - pub fn extract_proof(mut self) -> Option { - self.essence.recorder.take().map(|r| r.drain_storage_proof()) - } } -impl, H: Hasher, C: AsLocalTrieCache> sp_std::fmt::Debug - for TrieBackend -{ +impl, H: Hasher> sp_std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> Backend - for TrieBackend +impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { @@ -237,22 +75,10 @@ where type Transaction = S::Overlay; type TrieBackendStorage = S; - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.storage_hash(key) - } - fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.child_storage_hash(child_info, key) - } - fn child_storage( &self, child_info: &ChildInfo, @@ -343,6 +169,10 @@ where self.essence.child_storage_root(child_info, delta, state_version) } + fn as_trie_backend(&self) -> Option<&TrieBackend> { + Some(self) + } + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::UsageInfo { @@ -354,97 +184,20 @@ where } } -#[cfg(feature = "std")] -impl, H: Hasher, C> AsTrieBackend for TrieBackend { - type TrieBackendStorage = S; - - fn as_trie_backend(&self) -> &TrieBackend { - self - } -} - -/// Create a backend used for checking the proof, using `H` as hasher. -/// -/// `proof` and `root` must match, i.e. `root` must be the correct root of `proof` nodes. -#[cfg(feature = "std")] -pub fn create_proof_check_backend( - root: H::Out, - proof: StorageProof, -) -> Result, H>, Box> -where - H: Hasher, - H::Out: Codec, -{ - let db = proof.into_memory_db(); - - if db.contains(&root, hash_db::EMPTY_PREFIX) { - Ok(TrieBackendBuilder::new(db, root).build()) - } else { - Err(Box::new(crate::ExecutionError::InvalidProof)) - } -} - #[cfg(test)] pub mod tests { - use crate::{new_in_mem, InMemoryBackend}; - use super::*; use codec::Encode; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_trie::{ - cache::{CacheSize, SharedTrieCache}, - trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1}, - KeySpacedDBMut, PrefixedKey, PrefixedMemoryDB, Trie, TrieCache, TrieMut, + trie_types::{TrieDB, TrieDBMutV0, TrieDBMutV1}, + KeySpacedDBMut, PrefixedMemoryDB, Trie, TrieMut, }; use std::{collections::HashSet, iter}; - use trie_db::NodeCodec; const CHILD_KEY_1: &[u8] = b"sub1"; - type Recorder = sp_trie::recorder::Recorder; - type Cache = LocalTrieCache; - type SharedCache = SharedTrieCache; - - macro_rules! parameterized_test { - ($name:ident, $internal_name:ident) => { - #[test] - fn $name() { - let parameters = vec![ - (StateVersion::V0, None, None), - (StateVersion::V0, Some(SharedCache::new(CacheSize::Unlimited)), None), - (StateVersion::V0, None, Some(Recorder::default())), - ( - StateVersion::V0, - Some(SharedCache::new(CacheSize::Unlimited)), - Some(Recorder::default()), - ), - (StateVersion::V1, None, None), - (StateVersion::V1, Some(SharedCache::new(CacheSize::Unlimited)), None), - (StateVersion::V1, None, Some(Recorder::default())), - ( - StateVersion::V1, - Some(SharedCache::new(CacheSize::Unlimited)), - Some(Recorder::default()), - ), - ]; - - for (version, cache, recorder) in parameters { - eprintln!( - "Running with version {:?}, cache enabled {} and recorder enabled {}", - version, - cache.is_some(), - recorder.is_some() - ); - - let cache = cache.as_ref().map(|c| c.local_cache()); - - $internal_name(version, cache, recorder.clone()); - } - } - }; - } - pub(crate) fn test_db(state_version: StateVersion) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); @@ -453,12 +206,12 @@ pub mod tests { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); match state_version { StateVersion::V0 => { - let mut trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutV0::new(&mut mdb, &mut root); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); }, StateVersion::V1 => { - let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + let mut trie = TrieDBMutV1::new(&mut mdb, &mut root); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); }, @@ -487,11 +240,11 @@ pub mod tests { match state_version { StateVersion::V0 => { - let trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); + let trie = TrieDBMutV0::new(&mut mdb, &mut root); build(trie, &child_info, &sub_root[..]) }, StateVersion::V1 => { - let trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + let trie = TrieDBMutV1::new(&mut mdb, &mut root); build(trie, &child_info, &sub_root[..]) }, }; @@ -501,39 +254,27 @@ pub mod tests { pub(crate) fn test_trie( hashed_value: StateVersion, - cache: Option, - recorder: Option, ) -> TrieBackend, BlakeTwo256> { let (mdb, root) = test_db(hashed_value); - - TrieBackendBuilder::new(mdb, root) - .with_optional_cache(cache) - .with_optional_recorder(recorder) - .build() + TrieBackend::new(mdb, root) } - parameterized_test!(read_from_storage_returns_some, read_from_storage_returns_some_inner); - fn read_from_storage_returns_some_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - assert_eq!( - test_trie(state_version, cache, recorder).storage(b"key").unwrap(), - Some(b"value".to_vec()) - ); + #[test] + fn read_from_storage_returns_some() { + read_from_storage_returns_some_inner(StateVersion::V0); + read_from_storage_returns_some_inner(StateVersion::V1); + } + fn read_from_storage_returns_some_inner(state_version: StateVersion) { + assert_eq!(test_trie(state_version).storage(b"key").unwrap(), Some(b"value".to_vec())); } - parameterized_test!( - read_from_child_storage_returns_some, - read_from_child_storage_returns_some_inner - ); - fn read_from_child_storage_returns_some_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let test_trie = test_trie(state_version, cache, recorder); + #[test] + fn read_from_child_storage_returns_some() { + read_from_child_storage_returns_some_inner(StateVersion::V0); + read_from_child_storage_returns_some_inner(StateVersion::V1); + } + fn read_from_child_storage_returns_some_inner(state_version: StateVersion) { + let test_trie = test_trie(state_version); assert_eq!( test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") @@ -558,81 +299,65 @@ pub mod tests { ); } - parameterized_test!(read_from_storage_returns_none, read_from_storage_returns_none_inner); - fn read_from_storage_returns_none_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - assert_eq!( - test_trie(state_version, cache, recorder).storage(b"non-existing-key").unwrap(), - None - ); + #[test] + fn read_from_storage_returns_none() { + read_from_storage_returns_none_inner(StateVersion::V0); + read_from_storage_returns_none_inner(StateVersion::V1); + } + fn read_from_storage_returns_none_inner(state_version: StateVersion) { + assert_eq!(test_trie(state_version).storage(b"non-existing-key").unwrap(), None); } - parameterized_test!( - pairs_are_not_empty_on_non_empty_storage, - pairs_are_not_empty_on_non_empty_storage_inner - ); - fn pairs_are_not_empty_on_non_empty_storage_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - assert!(!test_trie(state_version, cache, recorder).pairs().is_empty()); + #[test] + fn pairs_are_not_empty_on_non_empty_storage() { + pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V0); + pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V1); + } + fn pairs_are_not_empty_on_non_empty_storage_inner(state_version: StateVersion) { + assert!(!test_trie(state_version).pairs().is_empty()); } #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackendBuilder::, BlakeTwo256>::new( + assert!(TrieBackend::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), ) - .build() .pairs() .is_empty()); } - parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner); - fn storage_root_is_non_default_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { + #[test] + fn storage_root_is_non_default() { + storage_root_is_non_default_inner(StateVersion::V0); + storage_root_is_non_default_inner(StateVersion::V1); + } + fn storage_root_is_non_default_inner(state_version: StateVersion) { assert!( - test_trie(state_version, cache, recorder) - .storage_root(iter::empty(), state_version) - .0 != H256::repeat_byte(0) + test_trie(state_version).storage_root(iter::empty(), state_version).0 != + H256::repeat_byte(0) ); } - parameterized_test!( - storage_root_transaction_is_non_empty, - storage_root_transaction_is_non_empty_inner - ); - fn storage_root_transaction_is_non_empty_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let (new_root, mut tx) = test_trie(state_version, cache, recorder) + #[test] + fn storage_root_transaction_is_non_empty() { + storage_root_transaction_is_non_empty_inner(StateVersion::V0); + storage_root_transaction_is_non_empty_inner(StateVersion::V1); + } + fn storage_root_transaction_is_non_empty_inner(state_version: StateVersion) { + let (new_root, mut tx) = test_trie(state_version) .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); - assert!( - new_root != - test_trie(state_version, None, None) - .storage_root(iter::empty(), state_version) - .0 - ); + assert!(new_root != test_trie(state_version).storage_root(iter::empty(), state_version).0); } - parameterized_test!(prefix_walking_works, prefix_walking_works_inner); - fn prefix_walking_works_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let trie = test_trie(state_version, cache, recorder); + #[test] + fn prefix_walking_works() { + prefix_walking_works_inner(StateVersion::V0); + prefix_walking_works_inner(StateVersion::V1); + } + fn prefix_walking_works_inner(state_version: StateVersion) { + let trie = test_trie(state_version); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { @@ -646,566 +371,23 @@ pub mod tests { assert_eq!(seen, expected); } - parameterized_test!( - keys_with_empty_prefix_returns_all_keys, - keys_with_empty_prefix_returns_all_keys_inner - ); - fn keys_with_empty_prefix_returns_all_keys_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { + #[test] + fn keys_with_empty_prefix_returns_all_keys() { + keys_with_empty_prefix_returns_all_keys_inner(StateVersion::V0); + keys_with_empty_prefix_returns_all_keys_inner(StateVersion::V1); + } + fn keys_with_empty_prefix_returns_all_keys_inner(state_version: StateVersion) { let (test_db, test_root) = test_db(state_version); - let expected = TrieDBBuilder::new(&test_db, &test_root) - .build() + let expected = TrieDB::new(&test_db, &test_root) + .unwrap() .iter() .unwrap() .map(|d| d.unwrap().0.to_vec()) .collect::>(); - let trie = test_trie(state_version, cache, recorder); + let trie = test_trie(state_version); let keys = trie.keys(&[]); assert_eq!(expected, keys); } - - parameterized_test!( - proof_is_empty_until_value_is_read, - proof_is_empty_until_value_is_read_inner - ); - fn proof_is_empty_until_value_is_read_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let trie_backend = test_trie(state_version, cache, recorder); - assert!(TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build() - .extract_proof() - .unwrap() - .is_empty()); - } - - parameterized_test!( - proof_is_non_empty_after_value_is_read, - proof_is_non_empty_after_value_is_read_inner - ); - fn proof_is_non_empty_after_value_is_read_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let trie_backend = test_trie(state_version, cache, recorder); - let backend = TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build(); - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().unwrap().is_empty()); - } - - #[test] - fn proof_is_invalid_when_does_not_contains_root() { - let result = create_proof_check_backend::( - H256::from_low_u64_be(1), - StorageProof::empty(), - ); - assert!(result.is_err()); - } - - parameterized_test!(passes_through_backend_calls, passes_through_backend_calls_inner); - fn passes_through_backend_calls_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let trie_backend = test_trie(state_version, cache, recorder); - let proving_backend = TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build(); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = - trie_backend.storage_root(std::iter::empty(), state_version); - let (proving_root, mut proving_mdb) = - proving_backend.storage_root(std::iter::empty(), state_version); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - } - - #[test] - fn proof_recorded_and_checked_top() { - proof_recorded_and_checked_inner(StateVersion::V0); - proof_recorded_and_checked_inner(StateVersion::V1); - } - fn proof_recorded_and_checked_inner(state_version: StateVersion) { - let size_content = 34; // above hashable value threshold. - let value_range = 0..64; - let contents = value_range - .clone() - .map(|i| (vec![i], Some(vec![i; size_content]))) - .collect::>(); - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_version); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; - value_range.clone().for_each(|i| { - assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) - }); - - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - value_range - .clone() - .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { - // Run multiple times to have a different cache conditions. - for i in 0..5 { - if let Some(cache) = &cache { - if i == 2 { - cache.reset_node_cache(); - } else if i == 3 { - cache.reset_value_cache(); - } - } - - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - - let proof = proving.extract_proof().unwrap(); - - let proof_check = - create_proof_check_backend::(in_memory_root.into(), proof) - .unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - } - } - } - - #[test] - fn proof_record_works_with_iter() { - proof_record_works_with_iter_inner(StateVersion::V0); - proof_record_works_with_iter_inner(StateVersion::V1); - } - fn proof_record_works_with_iter_inner(state_version: StateVersion) { - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { - // Run multiple times to have a different cache conditions. - for i in 0..5 { - if let Some(cache) = &cache { - if i == 2 { - cache.reset_node_cache(); - } else if i == 3 { - cache.reset_value_cache(); - } - } - - let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_version); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; - (0..64) - .for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); - - (0..63).for_each(|i| { - assert_eq!(proving.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) - }); - - let proof = proving.extract_proof().unwrap(); - - let proof_check = - create_proof_check_backend::(in_memory_root.into(), proof) - .unwrap(); - (0..63).for_each(|i| { - assert_eq!(proof_check.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) - }); - } - } - } - - #[test] - fn proof_recorded_and_checked_with_child() { - proof_recorded_and_checked_with_child_inner(StateVersion::V0); - proof_recorded_and_checked_with_child_inner(StateVersion::V1); - } - fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { - let child_info_1 = ChildInfo::new_default(b"sub1"); - let child_info_2 = ChildInfo::new_default(b"sub2"); - let child_info_1 = &child_info_1; - let child_info_2 = &child_info_2; - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), - (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), - ]; - let in_memory = new_in_mem::>(); - let in_memory = in_memory.update(contents, state_version); - let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory - .full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_version, - ) - .0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - (28..65).for_each(|i| { - assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) - }); - (10..15).for_each(|i| { - assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) - }); - - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { - // Run multiple times to have a different cache conditions. - for i in 0..5 { - eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); - - if let Some(cache) = &cache { - if i == 2 { - cache.reset_node_cache(); - } else if i == 3 { - cache.reset_value_cache(); - } - } - - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof().unwrap(); - - let proof_check = - create_proof_check_backend::(in_memory_root.into(), proof) - .unwrap(); - assert!(proof_check.storage(&[0]).is_err()); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - // note that it is include in root because proof close - assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); - assert_eq!(proof_check.storage(&[64]).unwrap(), None); - - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); - assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - assert_eq!(proving.child_storage(child_info_1, &[25]), Ok(None)); - assert_eq!(proving.child_storage(child_info_2, &[14]), Ok(Some(vec![14]))); - assert_eq!(proving.child_storage(child_info_2, &[25]), Ok(None)); - - let proof = proving.extract_proof().unwrap(); - let proof_check = - create_proof_check_backend::(in_memory_root.into(), proof) - .unwrap(); - assert_eq!( - proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), - vec![64] - ); - assert_eq!(proof_check.child_storage(child_info_1, &[25]).unwrap(), None); - - assert_eq!( - proof_check.child_storage(child_info_2, &[14]).unwrap().unwrap(), - vec![14] - ); - assert_eq!(proof_check.child_storage(child_info_2, &[25]).unwrap(), None); - } - } - } - - /// This tests an edge case when recording a child trie access with a cache. - /// - /// The accessed value/node is in the cache, but not the nodes to get to this value. So, - /// the recorder will need to traverse the trie to access these nodes from the backend when the - /// storage proof is generated. - #[test] - fn child_proof_recording_with_edge_cases_works() { - child_proof_recording_with_edge_cases_works_inner(StateVersion::V0); - child_proof_recording_with_edge_cases_works_inner(StateVersion::V1); - } - fn child_proof_recording_with_edge_cases_works_inner(state_version: StateVersion) { - let child_info_1 = ChildInfo::new_default(b"sub1"); - let child_info_1 = &child_info_1; - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), - ( - Some(child_info_1.clone()), - (28..65) - .map(|i| (vec![i], Some(vec![i]))) - // Some big value to ensure we get a new node - .chain(std::iter::once((vec![65], Some(vec![65; 128])))) - .collect(), - ), - ]; - let in_memory = new_in_mem::>(); - let in_memory = in_memory.update(contents, state_version); - let child_storage_keys = vec![child_info_1.to_owned()]; - let in_memory_root = in_memory - .full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_version, - ) - .0; - - let child_1_root = - in_memory.child_storage_root(child_info_1, std::iter::empty(), state_version).0; - let trie = in_memory.as_trie_backend(); - let nodes = { - let backend = TrieBackendBuilder::wrap(trie).with_recorder(Default::default()).build(); - let value = backend.child_storage(child_info_1, &[65]).unwrap().unwrap(); - let value_hash = BlakeTwo256::hash(&value); - assert_eq!(value, vec![65; 128]); - - let proof = backend.extract_proof().unwrap(); - - let mut nodes = Vec::new(); - for node in proof.into_iter_nodes() { - let hash = BlakeTwo256::hash(&node); - // Only insert the node/value that contains the important data. - if hash != value_hash { - let node = sp_trie::NodeCodec::::decode(&node) - .unwrap() - .to_owned_node::>() - .unwrap(); - - if let Some(data) = node.data() { - if data == &vec![65; 128] { - nodes.push((hash, node)); - } - } - } else if hash == value_hash { - nodes.push((hash, trie_db::node::NodeOwned::Value(node.into(), hash))); - } - } - - nodes - }; - - let cache = SharedTrieCache::::new(CacheSize::Unlimited); - { - let local_cache = cache.local_cache(); - let mut trie_cache = local_cache.as_trie_db_cache(child_1_root); - - // Put the value/node into the cache. - for (hash, node) in nodes { - trie_cache.get_or_insert_node(hash, &mut || Ok(node.clone())).unwrap(); - - if let Some(data) = node.data() { - trie_cache.cache_value_for_key(&[65], (data.clone(), hash).into()); - } - } - } - - { - // Record the access - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_cache(cache.local_cache()) - .build(); - assert_eq!(proving.child_storage(child_info_1, &[65]), Ok(Some(vec![65; 128]))); - - let proof = proving.extract_proof().unwrap(); - // And check that we have a correct proof. - let proof_check = - create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!( - proof_check.child_storage(child_info_1, &[65]).unwrap().unwrap(), - vec![65; 128] - ); - } - } - - parameterized_test!( - storage_proof_encoded_size_estimation_works, - storage_proof_encoded_size_estimation_works_inner - ); - fn storage_proof_encoded_size_estimation_works_inner( - state_version: StateVersion, - cache: Option, - recorder: Option, - ) { - let has_cache = cache.is_some(); - let trie_backend = test_trie(state_version, cache, recorder); - let keys = &[ - &b"key"[..], - &b"value1"[..], - &b"value2"[..], - &b"doesnotexist"[..], - &b"doesnotexist2"[..], - ]; - - fn check_estimation( - backend: TrieBackend< - impl TrieBackendStorage, - BlakeTwo256, - &'_ LocalTrieCache, - >, - has_cache: bool, - ) { - let estimation = backend.essence.recorder.as_ref().unwrap().estimate_encoded_size(); - let storage_proof = backend.extract_proof().unwrap(); - let storage_proof_size = - storage_proof.into_nodes().into_iter().map(|n| n.encoded_size()).sum::(); - - if has_cache { - // Estimation is not entirely correct when we have values already cached. - assert!(estimation >= storage_proof_size) - } else { - assert_eq!(storage_proof_size, estimation); - } - } - - for n in 0..keys.len() { - let backend = TrieBackendBuilder::wrap(&trie_backend) - .with_recorder(Recorder::default()) - .build(); - - // Read n keys - (0..n).for_each(|i| { - backend.storage(keys[i]).unwrap(); - }); - - // Check the estimation - check_estimation(backend, has_cache); - } - } - - #[test] - fn new_data_is_added_to_the_cache() { - let shared_cache = SharedTrieCache::new(CacheSize::Unlimited); - let new_data = vec![ - (&b"new_data0"[..], Some(&b"0"[..])), - (&b"new_data1"[..], Some(&b"1"[..])), - (&b"new_data2"[..], Some(&b"2"[..])), - (&b"new_data3"[..], Some(&b"3"[..])), - (&b"new_data4"[..], Some(&b"4"[..])), - ]; - - let new_root = { - let trie = test_trie(StateVersion::V1, Some(shared_cache.local_cache()), None); - trie.storage_root(new_data.clone().into_iter(), StateVersion::V1).0 - }; - - let local_cache = shared_cache.local_cache(); - let mut cache = local_cache.as_trie_db_cache(new_root); - // All the data should be cached now - for (key, value) in new_data { - assert_eq!( - value.unwrap(), - cache.lookup_value_for_key(key).unwrap().data().flatten().unwrap().as_ref() - ); - } - } - - /// Test to ensure that recording the same `key` for different tries works as expected. - /// - /// Each trie stores a different value under the same key. The values are big enough to - /// be not inlined with `StateVersion::V1`, this is important to test the expected behavior. The - /// trie recorder is expected to differentiate key access based on the different storage roots - /// of the tries. - #[test] - fn recording_same_key_access_in_different_tries() { - recording_same_key_access_in_different_tries_inner(StateVersion::V0); - recording_same_key_access_in_different_tries_inner(StateVersion::V1); - } - fn recording_same_key_access_in_different_tries_inner(state_version: StateVersion) { - let key = b"test_key".to_vec(); - // Use some big values to ensure that we don't keep them inline - let top_trie_val = vec![1; 1024]; - let child_trie_1_val = vec![2; 1024]; - let child_trie_2_val = vec![3; 1024]; - - let child_info_1 = ChildInfo::new_default(b"sub1"); - let child_info_2 = ChildInfo::new_default(b"sub2"); - let child_info_1 = &child_info_1; - let child_info_2 = &child_info_2; - let contents = vec![ - (None, vec![(key.clone(), Some(top_trie_val.clone()))]), - (Some(child_info_1.clone()), vec![(key.clone(), Some(child_trie_1_val.clone()))]), - (Some(child_info_2.clone()), vec![(key.clone(), Some(child_trie_2_val.clone()))]), - ]; - let in_memory = new_in_mem::>(); - let in_memory = in_memory.update(contents, state_version); - let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory - .full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_version, - ) - .0; - assert_eq!(in_memory.storage(&key).unwrap().unwrap(), top_trie_val); - assert_eq!(in_memory.child_storage(child_info_1, &key).unwrap().unwrap(), child_trie_1_val); - assert_eq!(in_memory.child_storage(child_info_2, &key).unwrap().unwrap(), child_trie_2_val); - - for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { - // Run multiple times to have a different cache conditions. - for i in 0..5 { - eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); - - if let Some(cache) = &cache { - if i == 2 { - cache.reset_node_cache(); - } else if i == 3 { - cache.reset_value_cache(); - } - } - - let trie = in_memory.as_trie_backend(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - - let proving = TrieBackendBuilder::wrap(&trie) - .with_recorder(Recorder::default()) - .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) - .build(); - assert_eq!(proving.storage(&key).unwrap().unwrap(), top_trie_val); - assert_eq!( - proving.child_storage(child_info_1, &key).unwrap().unwrap(), - child_trie_1_val - ); - assert_eq!( - proving.child_storage(child_info_2, &key).unwrap().unwrap(), - child_trie_2_val - ); - - let proof = proving.extract_proof().unwrap(); - - let proof_check = - create_proof_check_backend::(in_memory_root.into(), proof) - .unwrap(); - - assert_eq!(proof_check.storage(&key).unwrap().unwrap(), top_trie_val); - assert_eq!( - proof_check.child_storage(child_info_1, &key).unwrap().unwrap(), - child_trie_1_val - ); - assert_eq!( - proof_check.child_storage(child_info_2, &key).unwrap().unwrap(), - child_trie_2_val - ); - } - } - } } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index cdd1bb0bba055..7d910cc9602cc 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -18,32 +18,23 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use crate::{ - backend::Consolidate, debug, trie_backend::AsLocalTrieCache, warn, StorageKey, StorageValue, -}; -use codec::Codec; +use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; +use codec::Encode; use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; -#[cfg(not(feature = "std"))] -use sp_std::marker::PhantomData; use sp_std::{boxed::Box, vec::Vec}; -#[cfg(feature = "std")] -use sp_trie::recorder::Recorder; use sp_trie::{ - child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, - read_child_trie_value, read_trie_value, - trie_types::{TrieDBBuilder, TrieError}, - DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBIterator, TrieDBKeyIterator, - TrieRecorder, + child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_value, + read_trie_value, + trie_types::{TrieDB, TrieError}, + DBValue, KeySpacedDB, LayoutV1 as Layout, Trie, TrieDBIterator, TrieDBKeyIterator, }; #[cfg(feature = "std")] -use std::{collections::HashMap, sync::Arc}; - -// In this module, we only use layout for read operation and empty root, -// where V1 and V0 are equivalent. -use sp_trie::LayoutV1 as Layout; +use std::collections::HashMap; +#[cfg(feature = "std")] +use std::sync::Arc; #[cfg(not(feature = "std"))] macro_rules! format { @@ -77,21 +68,18 @@ impl Cache { } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher, C> { +pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, - #[cfg(feature = "std")] - pub(crate) trie_node_cache: Option, - #[cfg(feature = "std")] - pub(crate) recorder: Option>, - #[cfg(not(feature = "std"))] - _phantom: PhantomData, } -impl, H: Hasher, C> TrieBackendEssence { +impl, H: Hasher> TrieBackendEssence +where + H::Out: Encode, +{ /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -100,30 +88,6 @@ impl, H: Hasher, C> TrieBackendEssence { empty: H::hash(&[0u8]), #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), - #[cfg(feature = "std")] - trie_node_cache: None, - #[cfg(feature = "std")] - recorder: None, - #[cfg(not(feature = "std"))] - _phantom: PhantomData, - } - } - - /// Create new trie-based backend. - #[cfg(feature = "std")] - pub fn new_with_cache_and_recorder( - storage: S, - root: H::Out, - cache: Option, - recorder: Option>, - ) -> Self { - TrieBackendEssence { - storage, - root, - empty: H::hash(&[0u8]), - cache: Arc::new(RwLock::new(Cache::new())), - trie_node_cache: cache, - recorder, } } @@ -132,11 +96,6 @@ impl, H: Hasher, C> TrieBackendEssence { &self.storage } - /// Get backend storage mutable reference. - pub fn backend_storage_mut(&mut self) -> &mut S { - &mut self.storage - } - /// Get trie root. pub fn root(&self) -> &H::Out { &self.root @@ -161,107 +120,7 @@ impl, H: Hasher, C> TrieBackendEssence { pub fn into_storage(self) -> S { self.storage } -} -impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEssence { - /// Call the given closure passing it the recorder and the cache. - /// - /// If the given `storage_root` is `None`, `self.root` will be used. - #[cfg(feature = "std")] - fn with_recorder_and_cache( - &self, - storage_root: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> R, - ) -> R { - let storage_root = storage_root.unwrap_or_else(|| self.root); - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); - let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), - None => None, - }; - - let mut cache = self - .trie_node_cache - .as_ref() - .map(|c| c.as_local_trie_cache().as_trie_db_cache(storage_root)); - let cache = cache.as_mut().map(|c| c as _); - - callback(recorder, cache) - } - - #[cfg(not(feature = "std"))] - fn with_recorder_and_cache( - &self, - _: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> R, - ) -> R { - callback(None, None) - } - - /// Call the given closure passing it the recorder and the cache. - /// - /// This function must only be used when the operation in `callback` is - /// calculating a `storage_root`. It is expected that `callback` returns - /// the new storage root. This is required to register the changes in the cache - /// for the correct storage root. The given `storage_root` corresponds to the root of the "old" - /// trie. If the value is not given, `self.root` is used. - #[cfg(feature = "std")] - fn with_recorder_and_cache_for_storage_root( - &self, - storage_root: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { - let storage_root = storage_root.unwrap_or_else(|| self.root); - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); - let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), - None => None, - }; - - let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { - let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); - - let (new_root, r) = callback(recorder, Some(&mut cache)); - - if let Some(new_root) = new_root { - cache.merge_into(local_cache.as_local_trie_cache(), new_root); - } - - r - } else { - callback(recorder, None).1 - }; - - result - } - - #[cfg(not(feature = "std"))] - fn with_recorder_and_cache_for_storage_root( - &self, - _: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { - callback(None, None).1 - } -} - -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> - TrieBackendEssence -where - H::Out: Codec + Ord, -{ /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. pub fn next_storage_key(&self, key: &[u8]) -> Result> { @@ -325,82 +184,39 @@ where dyn_eph = self; } - self.with_recorder_and_cache(Some(*root), |recorder, cache| { - let trie = TrieDBBuilder::::new(dyn_eph, root) - .with_optional_recorder(recorder) - .with_optional_cache(cache) - .build(); - - let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; + let trie = + TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; - // The key just after the one given in input, basically `key++0`. - // Note: We are sure this is the next key if: - // * size of key has no limit (i.e. we can always add 0 to the path), - // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). - let mut potential_next_key = Vec::with_capacity(key.len() + 1); - potential_next_key.extend_from_slice(key); - potential_next_key.push(0); + // The key just after the one given in input, basically `key++0`. + // Note: We are sure this is the next key if: + // * size of key has no limit (i.e. we can always add 0 to the path), + // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). + let mut potential_next_key = Vec::with_capacity(key.len() + 1); + potential_next_key.extend_from_slice(key); + potential_next_key.push(0); - iter.seek(&potential_next_key) - .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; + iter.seek(&potential_next_key) + .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; - let next_element = iter.next(); - - let next_key = if let Some(next_element) = next_element { - let next_key = - next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; - Some(next_key) - } else { - None - }; + let next_element = iter.next(); - Ok(next_key) - }) - } - - /// Returns the hash value - pub fn storage_hash(&self, key: &[u8]) -> Result> { - let map_e = |e| format!("Trie lookup error: {}", e); + let next_key = if let Some(next_element) = next_element { + let next_key = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; + Some(next_key) + } else { + None + }; - self.with_recorder_and_cache(None, |recorder, cache| { - TrieDBBuilder::new(self, &self.root) - .with_optional_cache(cache) - .with_optional_recorder(recorder) - .build() - .get_hash(key) - .map_err(map_e) - }) + Ok(next_key) } /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); - self.with_recorder_and_cache(None, |recorder, cache| { - read_trie_value::, _>(self, &self.root, key, recorder, cache).map_err(map_e) - }) - } - - /// Returns the hash value - pub fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Result> { - let child_root = match self.child_root(child_info)? { - Some(root) => root, - None => return Ok(None), - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_hash::, _>( - child_info.keyspace(), - self, - &child_root, - key, - recorder, - cache, - ) - .map_err(map_e) - }) + read_trie_value::, _>(self, &self.root, key).map_err(map_e) } /// Get the value of child storage at given key. @@ -409,24 +225,15 @@ where child_info: &ChildInfo, key: &[u8], ) -> Result> { - let child_root = match self.child_root(child_info)? { + let root = match self.child_root(child_info)? { Some(root) => root, None => return Ok(None), }; let map_e = |e| format!("Trie lookup error: {}", e); - self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_value::, _>( - child_info.keyspace(), - self, - &child_root, - key, - recorder, - cache, - ) + read_child_trie_value::, _>(child_info.keyspace(), self, &root, key) .map_err(map_e) - }) } /// Retrieve all entries keys of storage and call `f` for each of those keys. @@ -531,33 +338,28 @@ where maybe_start_at: Option<&[u8]>, ) { let mut iter = move |db| -> sp_std::result::Result<(), Box>> { - self.with_recorder_and_cache(Some(*root), |recorder, cache| { - let trie = TrieDBBuilder::::new(db, root) - .with_optional_recorder(recorder) - .with_optional_cache(cache) - .build(); - let prefix = maybe_prefix.unwrap_or(&[]); - let iter = match maybe_start_at { - Some(start_at) => - TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), - None => TrieDBKeyIterator::new_prefixed(&trie, prefix), - }?; - - for x in iter { - let key = x?; - - debug_assert!(maybe_prefix - .as_ref() - .map(|prefix| key.starts_with(prefix)) - .unwrap_or(true)); - - if !f(&key) { - break - } + let trie = TrieDB::::new(db, root)?; + let prefix = maybe_prefix.unwrap_or(&[]); + let iter = match maybe_start_at { + Some(start_at) => + TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), + None => TrieDBKeyIterator::new_prefixed(&trie, prefix), + }?; + + for x in iter { + let key = x?; + + debug_assert!(maybe_prefix + .as_ref() + .map(|prefix| key.starts_with(prefix)) + .unwrap_or(true)); + + if !f(&key) { + break } + } - Ok(()) - }) + Ok(()) }; let result = if let Some(child_info) = child_info { @@ -581,30 +383,25 @@ where allow_missing_nodes: bool, ) -> Result { let mut iter = move |db| -> sp_std::result::Result>> { - self.with_recorder_and_cache(Some(*root), |recorder, cache| { - let trie = TrieDBBuilder::::new(db, root) - .with_optional_recorder(recorder) - .with_optional_cache(cache) - .build(); - - let prefix = prefix.unwrap_or(&[]); - let iterator = if let Some(start_at) = start_at { - TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? - } else { - TrieDBIterator::new_prefixed(&trie, prefix)? - }; - for x in iterator { - let (key, value) = x?; - - debug_assert!(key.starts_with(prefix)); - - if !f(key, value) { - return Ok(false) - } + let trie = TrieDB::::new(db, root)?; + + let prefix = prefix.unwrap_or(&[]); + let iterator = if let Some(start_at) = start_at { + TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? + } else { + TrieDBIterator::new_prefixed(&trie, prefix)? + }; + for x in iterator { + let (key, value) = x?; + + debug_assert!(key.starts_with(prefix)); + + if !f(key, value) { + return Ok(false) } + } - Ok(true) - }) + Ok(true) }; let result = if let Some(child_info) = child_info { @@ -639,20 +436,14 @@ where /// Returns all `(key, value)` pairs in the trie. pub fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let collect_all = || -> sp_std::result::Result<_, Box>> { - self.with_recorder_and_cache(None, |recorder, cache| { - let trie = TrieDBBuilder::::new(self, self.root()) - .with_optional_cache(cache) - .with_optional_recorder(recorder) - .build(); - - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, value) = x?; - v.push((key.to_vec(), value.to_vec())); - } + let trie = TrieDB::::new(self, &self.root)?; + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, value) = x?; + v.push((key.to_vec(), value.to_vec())); + } - Ok(v) - }) + Ok(v) }; match collect_all() { @@ -676,28 +467,27 @@ where &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, S::Overlay) { + ) -> (H::Out, S::Overlay) + where + H::Out: Ord, + { let mut write_overlay = S::Overlay::default(); + let mut root = self.root; - let root = self.with_recorder_and_cache_for_storage_root(None, |recorder, cache| { + { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); let res = match state_version { - StateVersion::V0 => delta_trie_root::, _, _, _, _, _>( - &mut eph, self.root, delta, recorder, cache, - ), - StateVersion::V1 => delta_trie_root::, _, _, _, _, _>( - &mut eph, self.root, delta, recorder, cache, - ), + StateVersion::V0 => + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), + StateVersion::V1 => + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), }; match res { - Ok(ret) => (Some(ret), ret), - Err(e) => { - warn!(target: "trie", "Failed to write to trie: {}", e); - (None, self.root) - }, + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } - }); + } (root, write_overlay) } @@ -709,12 +499,15 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, S::Overlay) { + ) -> (H::Out, bool, S::Overlay) + where + H::Out: Ord, + { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; let mut write_overlay = S::Overlay::default(); - let child_root = match self.child_root(child_info) { + let mut root = match self.child_root(child_info) { Ok(Some(hash)) => hash, Ok(None) => default_root, Err(e) => { @@ -723,40 +516,32 @@ where }, }; - let new_child_root = - self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { - let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); - match match state_version { - StateVersion::V0 => - child_delta_trie_root::, _, _, _, _, _, _>( - child_info.keyspace(), - &mut eph, - child_root, - delta, - recorder, - cache, - ), - StateVersion::V1 => - child_delta_trie_root::, _, _, _, _, _, _>( - child_info.keyspace(), - &mut eph, - child_root, - delta, - recorder, - cache, - ), - } { - Ok(ret) => (Some(ret), ret), - Err(e) => { - warn!(target: "trie", "Failed to write to trie: {}", e); - (None, child_root) - }, - } - }); + { + let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); + match match state_version { + StateVersion::V0 => + child_delta_trie_root::, _, _, _, _, _, _>( + child_info.keyspace(), + &mut eph, + root, + delta, + ), + StateVersion::V1 => + child_delta_trie_root::, _, _, _, _, _, _>( + child_info.keyspace(), + &mut eph, + root, + delta, + ), + } { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } + } - let is_default = new_child_root == default_root; + let is_default = root == default_root; - (new_child_root, is_default, write_overlay) + (root, is_default, write_overlay) } } @@ -830,14 +615,6 @@ pub trait TrieBackendStorage: Send + Sync { fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } -impl, H: Hasher> TrieBackendStorage for &T { - type Overlay = T::Overlay; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - (*self).get(key, prefix) - } -} - // This implementation is used by normal storage trie clients. #[cfg(feature = "std")] impl TrieBackendStorage for Arc> { @@ -860,9 +637,7 @@ where } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> AsHashDB - for TrieBackendEssence -{ +impl, H: Hasher> AsHashDB for TrieBackendEssence { fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } @@ -871,9 +646,7 @@ impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> HashDB - for TrieBackendEssence -{ +impl, H: Hasher> HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -904,9 +677,7 @@ impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> } } -impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> - HashDBRef for TrieBackendEssence -{ +impl, H: Hasher> HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } @@ -921,8 +692,7 @@ mod test { use super::*; use sp_core::{Blake2Hasher, H256}; use sp_trie::{ - cache::LocalTrieCache, trie_types::TrieDBMutBuilderV1 as TrieDBMutBuilder, KeySpacedDBMut, - PrefixedMemoryDB, TrieMut, + trie_types::TrieDBMutV1 as TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut, }; #[test] @@ -936,7 +706,7 @@ mod test { let mut mdb = PrefixedMemoryDB::::default(); { - let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); + let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); trie.insert(b"3", &[1]).expect("insert failed"); trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); @@ -945,18 +715,18 @@ mod test { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); // reuse of root_1 implicitly assert child trie root is same // as top trie (contents must remain the same). - let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); + let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); trie.insert(b"3", &[1]).expect("insert failed"); trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } { - let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_2).build(); + let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_1); + let essence_1 = TrieBackendEssence::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -964,8 +734,8 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let mdb = essence_1.backend_storage().clone(); - let essence_2 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_2); + let mdb = essence_1.into_storage(); + let essence_2 = TrieBackendEssence::new(mdb, root_2); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index d04a88d129d34..b37a4eb4b331d 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-serde = { version = "0.4.0", optional = true } +impl-serde = { version = "0.3.1", optional = true } ref-cast = "1.0.0" serde = { version = "1.0.136", features = ["derive"], optional = true } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 79c1012196bde..0948cf431158d 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -19,8 +19,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -use core::fmt::Display; - #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; use sp_debug_derive::RuntimeDebug; @@ -49,9 +47,8 @@ impl AsRef<[u8]> for StorageKey { } /// Storage key with read/write tracking information. -#[derive( - PartialEq, Eq, Ord, PartialOrd, sp_std::hash::Hash, RuntimeDebug, Clone, Encode, Decode, -)] +#[derive(PartialEq, Eq, RuntimeDebug, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] pub struct TrackedStorageKey { pub key: Vec, pub reads: u32, @@ -414,15 +411,6 @@ pub enum StateVersion { V1 = 1, } -impl Display for StateVersion { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - StateVersion::V0 => f.write_str("0"), - StateVersion::V1 => f.write_str("1"), - } - } -} - impl Default for StateVersion { fn default() -> Self { StateVersion::V1 diff --git a/primitives/tasks/Cargo.toml b/primitives/tasks/Cargo.toml new file mode 100644 index 0000000000000..c37a8a66f94df --- /dev/null +++ b/primitives/tasks/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "sp-tasks" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime asynchronous, pure computational tasks" +documentation = "https://docs.rs/sp-tasks" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +log = { version = "0.4.17", optional = true } +sp-core = { version = "6.0.0", default-features = false, path = "../core" } +sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } +sp-io = { version = "6.0.0", default-features = false, path = "../io" } +sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } +sp-std = { version = "4.0.0", default-features = false, path = "../std" } + +[dev-dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } + +[features] +default = ["std"] +std = [ + "log", + "sp-core/std", + "sp-externalities", + "sp-io/std", + "sp-runtime-interface/std", + "sp-std/std", +] diff --git a/primitives/tasks/README.md b/primitives/tasks/README.md new file mode 100644 index 0000000000000..1235e1bd933d4 --- /dev/null +++ b/primitives/tasks/README.md @@ -0,0 +1,3 @@ +Runtime asynchronous, pure computational tasks. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs new file mode 100644 index 0000000000000..008955a714b21 --- /dev/null +++ b/primitives/tasks/src/async_externalities.rs @@ -0,0 +1,212 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Async externalities. + +use sp_core::{ + storage::{ChildInfo, StateVersion, TrackedStorageKey}, + traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, +}; +use sp_externalities::{Extensions, ExternalitiesExt as _, MultiRemovalResults}; +use std::any::{Any, TypeId}; + +/// Simple state-less externalities for use in async context. +/// +/// Will panic if anything is accessing the storage. +#[derive(Debug)] +pub struct AsyncExternalities { + extensions: Extensions, +} + +/// New Async externalities. +pub fn new_async_externalities( + scheduler: Box, +) -> Result { + let mut res = AsyncExternalities { extensions: Default::default() }; + let mut ext = &mut res as &mut dyn Externalities; + ext.register_extension::(TaskExecutorExt(scheduler.clone())) + .map_err(|_| "Failed to register task executor extension.")?; + + Ok(res) +} + +impl AsyncExternalities { + /// Extend async externalities with the ability to spawn wasm instances. + pub fn with_runtime_spawn( + mut self, + runtime_ext: Box, + ) -> Result { + let mut ext = &mut self as &mut dyn Externalities; + ext.register_extension::(RuntimeSpawnExt(runtime_ext)) + .map_err(|_| "Failed to register task executor extension.")?; + + Ok(self) + } +} + +type StorageKey = Vec; + +type StorageValue = Vec; + +impl Externalities for AsyncExternalities { + fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) { + panic!("`set_offchain_storage`: should not be used in async externalities!") + } + + fn storage(&self, _key: &[u8]) -> Option { + panic!("`storage`: should not be used in async externalities!") + } + + fn storage_hash(&self, _key: &[u8]) -> Option> { + panic!("`storage_hash`: should not be used in async externalities!") + } + + fn child_storage(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { + panic!("`child_storage`: should not be used in async externalities!") + } + + fn child_storage_hash(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option> { + panic!("`child_storage_hash`: should not be used in async externalities!") + } + + fn next_storage_key(&self, _key: &[u8]) -> Option { + panic!("`next_storage_key`: should not be used in async externalities!") + } + + fn next_child_storage_key(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { + panic!("`next_child_storage_key`: should not be used in async externalities!") + } + + fn place_storage(&mut self, _key: StorageKey, _maybe_value: Option) { + panic!("`place_storage`: should not be used in async externalities!") + } + + fn place_child_storage( + &mut self, + _child_info: &ChildInfo, + _key: StorageKey, + _value: Option, + ) { + panic!("`place_child_storage`: should not be used in async externalities!") + } + + fn kill_child_storage( + &mut self, + _child_info: &ChildInfo, + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + panic!("`kill_child_storage`: should not be used in async externalities!") + } + + fn clear_prefix( + &mut self, + _prefix: &[u8], + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + panic!("`clear_prefix`: should not be used in async externalities!") + } + + fn clear_child_prefix( + &mut self, + _child_info: &ChildInfo, + _prefix: &[u8], + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + panic!("`clear_child_prefix`: should not be used in async externalities!") + } + + fn storage_append(&mut self, _key: Vec, _value: Vec) { + panic!("`storage_append`: should not be used in async externalities!") + } + + fn storage_root(&mut self, _state_version: StateVersion) -> Vec { + panic!("`storage_root`: should not be used in async externalities!") + } + + fn child_storage_root( + &mut self, + _child_info: &ChildInfo, + _state_version: StateVersion, + ) -> Vec { + panic!("`child_storage_root`: should not be used in async externalities!") + } + + fn storage_start_transaction(&mut self) { + unimplemented!("Transactions are not supported by AsyncExternalities"); + } + + fn storage_rollback_transaction(&mut self) -> Result<(), ()> { + unimplemented!("Transactions are not supported by AsyncExternalities"); + } + + fn storage_commit_transaction(&mut self) -> Result<(), ()> { + unimplemented!("Transactions are not supported by AsyncExternalities"); + } + + fn wipe(&mut self) {} + + fn commit(&mut self) {} + + fn read_write_count(&self) -> (u32, u32, u32, u32) { + unimplemented!("read_write_count is not supported in AsyncExternalities") + } + + fn reset_read_write_count(&mut self) { + unimplemented!("reset_read_write_count is not supported in AsyncExternalities") + } + + fn get_whitelist(&self) -> Vec { + unimplemented!("get_whitelist is not supported in AsyncExternalities") + } + + fn set_whitelist(&mut self, _: Vec) { + unimplemented!("set_whitelist is not supported in AsyncExternalities") + } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + unimplemented!("get_read_and_written_keys is not supported in AsyncExternalities") + } +} + +impl sp_externalities::ExtensionStore for AsyncExternalities { + fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { + self.extensions.get_mut(type_id) + } + + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), sp_externalities::Error> { + self.extensions.register_with_type_id(type_id, extension) + } + + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + if self.extensions.deregister(type_id) { + Ok(()) + } else { + Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id)) + } + } +} diff --git a/primitives/tasks/src/lib.rs b/primitives/tasks/src/lib.rs new file mode 100644 index 0000000000000..3711fa71a2fab --- /dev/null +++ b/primitives/tasks/src/lib.rs @@ -0,0 +1,257 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime tasks. +//! +//! Contains runtime-usable functions for spawning parallel purely computational tasks. +//! +//! NOTE: This is experimental API. +//! NOTE: When using in actual runtime, make sure you don't produce unbounded parallelism. +//! So this is bad example to use it: +//! ```rust +//! fn my_parallel_computator(data: Vec) -> Vec { +//! unimplemented!() +//! } +//! fn test(dynamic_variable: i32) { +//! for _ in 0..dynamic_variable { sp_tasks::spawn(my_parallel_computator, vec![]); } +//! } +//! ``` +//! +//! While this is a good example: +//! ```rust +//! use codec::Encode; +//! static STATIC_VARIABLE: i32 = 4; +//! +//! fn my_parallel_computator(data: Vec) -> Vec { +//! unimplemented!() +//! } +//! +//! fn test(computation_payload: Vec) { +//! let parallel_tasks = (0..STATIC_VARIABLE).map(|idx| +//! sp_tasks::spawn(my_parallel_computator, computation_payload.chunks(10).nth(idx as _).encode()) +//! ); +//! } +//! ``` +//! +//! When allowing unbounded parallelism, malicious transactions can exploit it and partition +//! network consensus based on how much resources nodes have. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "std")] +mod async_externalities; + +#[cfg(feature = "std")] +pub use async_externalities::{new_async_externalities, AsyncExternalities}; + +#[cfg(feature = "std")] +mod inner { + use sp_core::traits::TaskExecutorExt; + use sp_externalities::ExternalitiesExt as _; + use std::{panic::AssertUnwindSafe, sync::mpsc}; + + /// Task handle (wasm). + /// + /// This can be `join`-ed to get (blocking) the result of + /// the spawned task execution. + #[must_use] + pub struct DataJoinHandle { + receiver: mpsc::Receiver>, + } + + impl DataJoinHandle { + /// Join handle returned by `spawn` function + pub fn join(self) -> Vec { + self.receiver + .recv() + .expect("Spawned runtime task terminated before sending result.") + } + } + + /// Spawn new runtime task (native). + pub fn spawn(entry_point: fn(Vec) -> Vec, data: Vec) -> DataJoinHandle { + let scheduler = sp_externalities::with_externalities(|mut ext| { + ext.extension::() + .expect("No task executor associated with the current context!") + .clone() + }) + .expect("Spawn called outside of externalities context!"); + + let (sender, receiver) = mpsc::channel(); + let extra_scheduler = scheduler.clone(); + scheduler.spawn( + "parallel-runtime-spawn", + Some("substrate-runtime"), + Box::pin(async move { + let result = match crate::new_async_externalities(extra_scheduler) { + Ok(mut ext) => { + let mut ext = AssertUnwindSafe(&mut ext); + match std::panic::catch_unwind(move || { + sp_externalities::set_and_run_with_externalities( + &mut **ext, + move || entry_point(data), + ) + }) { + Ok(result) => result, + Err(panic) => { + log::error!( + target: "runtime", + "Spawned task panicked: {:?}", + panic, + ); + + // This will drop sender without sending anything. + return + }, + } + }, + Err(e) => { + log::error!( + target: "runtime", + "Unable to run async task: {}", + e, + ); + + return + }, + }; + + let _ = sender.send(result); + }), + ); + + DataJoinHandle { receiver } + } +} + +#[cfg(not(feature = "std"))] +mod inner { + use core::mem; + use sp_std::prelude::*; + + /// Dispatch wrapper for wasm blob. + /// + /// Serves as trampoline to call any rust function with (Vec) -> Vec compiled + /// into the runtime. + /// + /// Function item should be provided with `func_ref`. Argument for the call + /// will be generated from bytes at `payload_ptr` with `payload_len`. + /// + /// NOTE: Since this dynamic dispatch function and the invoked function are compiled with + /// the same compiler, there should be no problem with ABI incompatibility. + extern "C" fn dispatch_wrapper( + func_ref: *const u8, + payload_ptr: *mut u8, + payload_len: u32, + ) -> u64 { + let payload_len = payload_len as usize; + let output = unsafe { + let payload = Vec::from_raw_parts(payload_ptr, payload_len, payload_len); + let ptr: fn(Vec) -> Vec = mem::transmute(func_ref); + (ptr)(payload) + }; + sp_runtime_interface::pack_ptr_and_len(output.as_ptr() as usize as _, output.len() as _) + } + + /// Spawn new runtime task (wasm). + pub fn spawn(entry_point: fn(Vec) -> Vec, payload: Vec) -> DataJoinHandle { + let func_ptr: usize = unsafe { mem::transmute(entry_point) }; + + let handle = + sp_io::runtime_tasks::spawn(dispatch_wrapper as usize as _, func_ptr as u32, payload); + DataJoinHandle { handle } + } + + /// Task handle (wasm). + /// + /// This can be `join`-ed to get (blocking) the result of + /// the spawned task execution. + #[must_use] + pub struct DataJoinHandle { + handle: u64, + } + + impl DataJoinHandle { + /// Join handle returned by `spawn` function + pub fn join(self) -> Vec { + sp_io::runtime_tasks::join(self.handle) + } + } +} + +pub use inner::{spawn, DataJoinHandle}; + +#[cfg(test)] +mod tests { + + use super::*; + + fn async_runner(mut data: Vec) -> Vec { + data.sort(); + data + } + + fn async_panicker(_data: Vec) -> Vec { + panic!("panic in async panicker!") + } + + #[test] + fn basic() { + sp_io::TestExternalities::default().execute_with(|| { + let a1 = spawn(async_runner, vec![5, 2, 1]).join(); + assert_eq!(a1, vec![1, 2, 5]); + }) + } + + #[test] + fn panicking() { + let res = sp_io::TestExternalities::default().execute_with_safe(|| { + spawn(async_panicker, vec![5, 2, 1]).join(); + }); + + assert!(res.unwrap_err().contains("Closure panicked")); + } + + #[test] + fn many_joins() { + sp_io::TestExternalities::default() + .execute_with_safe(|| { + // converges to 1 only after 1000+ steps + let mut running_val = 9780657630u64; + let mut data = vec![]; + let handles = (0..1024) + .map(|_| { + running_val = if running_val % 2 == 0 { + running_val / 2 + } else { + 3 * running_val + 1 + }; + data.push(running_val as u8); + (spawn(async_runner, data.clone()), data.clone()) + }) + .collect::>(); + + for (handle, mut data) in handles { + let result = handle.join(); + data.sort(); + + assert_eq!(result, data); + } + }) + .expect("Failed to run with externalities"); + } +} diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 28fa6e6213daf..2a20addf66b2b 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 2e8f281cd7c7b..42701f5ad3bf1 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.50", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures-timer = { version = "3.0.2", optional = true } log = { version = "0.4.17", optional = true } diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index d88b1839babe6..b98a87c37f69d 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -56,17 +56,6 @@ impl Timestamp { pub fn checked_sub(self, other: Self) -> Option { self.0.checked_sub(other.0).map(Self) } - - /// The current timestamp using the system time. - #[cfg(feature = "std")] - pub fn current() -> Self { - use std::time::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .expect("Current time is always after unix epoch; qed") - .into() - } } impl sp_std::ops::Deref for Timestamp { @@ -176,6 +165,18 @@ impl TimestampInherentData for InherentData { } } +/// The current timestamp using the system time. +/// +/// This timestamp is the time since the UNIX epoch. +#[cfg(feature = "std")] +fn current_timestamp() -> std::time::Duration { + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .expect("Current time is always after unix epoch; qed") +} + /// Provide duration since unix epoch in millisecond for timestamp inherent. #[cfg(feature = "std")] pub struct InherentDataProvider { @@ -189,7 +190,7 @@ impl InherentDataProvider { pub fn from_system_time() -> Self { Self { max_drift: std::time::Duration::from_secs(60).into(), - timestamp: Timestamp::current(), + timestamp: current_timestamp().into(), } } diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index e916462675435..a5a80528c6734 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.57", optional = true } +async-trait = { version = "0.1.50", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index fde84c1c58b1a..ee0c8e4ec8e29 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -200,8 +200,7 @@ pub mod registration { let mut transaction_root = sp_trie::empty_trie_root::(); { let mut trie = - sp_trie::TrieDBMutBuilder::::new(&mut db, &mut transaction_root) - .build(); + sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); for (index, chunk) in chunks.enumerate() { let index = encode_index(index as u32); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 211e071c073af..a3754461f890e 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -18,47 +18,33 @@ name = "bench" harness = false [dependencies] -ahash = { version = "0.7.6", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } -lazy_static = { version = "1.4.0", optional = true } -lru = { version = "0.8.1", optional = true } -memory-db = { version = "0.30.0", default-features = false } -nohash-hasher = { version = "0.2.0", optional = true } -parking_lot = { version = "0.12.1", optional = true } +memory-db = { version = "0.29.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.30", optional = true } -tracing = { version = "0.1.29", optional = true } -trie-db = { version = "0.24.0", default-features = false } +trie-db = { version = "0.23.1", default-features = false } trie-root = { version = "0.17.0", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] -array-bytes = "4.1" criterion = "0.3.3" -trie-bench = "0.32.0" +hex-literal = "0.3.4" +trie-bench = "0.30.0" trie-standardmap = "0.15.2" sp-runtime = { version = "6.0.0", path = "../runtime" } [features] default = ["std"] std = [ - "ahash", "codec/std", - "hashbrown", "hash-db/std", - "lazy_static", - "lru", "memory-db/std", - "nohash-hasher", - "parking_lot", "scale-info/std", "sp-core/std", "sp-std/std", "thiserror", - "tracing", "trie-db/std", "trie-root/std", ] diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs deleted file mode 100644 index 85539cf626857..0000000000000 --- a/primitives/trie/src/cache/mod.rs +++ /dev/null @@ -1,692 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Trie Cache -//! -//! Provides an implementation of the [`TrieCache`](trie_db::TrieCache) trait. -//! The implementation is split into three types [`SharedTrieCache`], [`LocalTrieCache`] and -//! [`TrieCache`]. The [`SharedTrieCache`] is the instance that should be kept around for the entire -//! lifetime of the node. It will store all cached trie nodes and values on a global level. Then -//! there is the [`LocalTrieCache`] that should be kept around per state instance requested from the -//! backend. As there are very likely multiple accesses to the state per instance, this -//! [`LocalTrieCache`] is used to cache the nodes and the values before they are merged back to the -//! shared instance. Last but not least there is the [`TrieCache`] that is being used per access to -//! the state. It will use the [`SharedTrieCache`] and the [`LocalTrieCache`] to fulfill cache -//! requests. If both of them don't provide the requested data it will be inserted into the -//! [`LocalTrieCache`] and then later into the [`SharedTrieCache`]. -//! -//! The [`SharedTrieCache`] is bound to some maximum number of bytes. It is ensured that it never -//! runs above this limit. However as long as data is cached inside a [`LocalTrieCache`] it isn't -//! taken into account when limiting the [`SharedTrieCache`]. This means that for the lifetime of a -//! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum. - -use crate::{Error, NodeCodec}; -use hash_db::Hasher; -use hashbrown::HashSet; -use nohash_hasher::BuildNoHashHasher; -use parking_lot::{Mutex, MutexGuard, RwLockReadGuard}; -use shared_cache::{SharedValueCache, ValueCacheKey}; -use std::{ - collections::{hash_map::Entry as MapEntry, HashMap}, - sync::Arc, -}; -use trie_db::{node::NodeOwned, CachedValue}; - -mod shared_cache; - -pub use shared_cache::SharedTrieCache; - -use self::shared_cache::{SharedTrieCacheInner, ValueCacheKeyHash}; - -const LOG_TARGET: &str = "trie-cache"; - -/// The size of the cache. -#[derive(Debug, Clone, Copy)] -pub enum CacheSize { - /// Do not limit the cache size. - Unlimited, - /// Let the cache in maximum use the given amount of bytes. - Maximum(usize), -} - -impl CacheSize { - /// Returns `true` if the `current_size` exceeds the allowed size. - fn exceeds(&self, current_size: usize) -> bool { - match self { - Self::Unlimited => false, - Self::Maximum(max) => *max < current_size, - } - } -} - -/// The local trie cache. -/// -/// This cache should be used per state instance created by the backend. One state instance is -/// referring to the state of one block. It will cache all the accesses that are done to the state -/// which could not be fullfilled by the [`SharedTrieCache`]. These locally cached items are merged -/// back to the shared trie cache when this instance is dropped. -/// -/// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. -/// So, it is important that these methods are not called multiple times, because they otherwise -/// deadlock. -pub struct LocalTrieCache { - /// The shared trie cache that created this instance. - shared: SharedTrieCache, - /// The local cache for the trie nodes. - node_cache: Mutex>>, - /// Keeps track of all the trie nodes accessed in the shared cache. - /// - /// This will be used to ensure that these nodes are brought to the front of the lru when this - /// local instance is merged back to the shared cache. - shared_node_cache_access: Mutex>, - /// The local cache for the values. - value_cache: Mutex< - HashMap< - ValueCacheKey<'static, H::Out>, - CachedValue, - BuildNoHashHasher>, - >, - >, - /// Keeps track of all values accessed in the shared cache. - /// - /// This will be used to ensure that these nodes are brought to the front of the lru when this - /// local instance is merged back to the shared cache. This can actually lead to collision when - /// two [`ValueCacheKey`]s with different storage roots and keys map to the same hash. However, - /// as we only use this set to update the lru position it is fine, even if we bring the wrong - /// value to the top. The important part is that we always get the correct value from the value - /// cache for a given key. - shared_value_cache_access: - Mutex>>, -} - -impl LocalTrieCache { - /// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache. - /// - /// The given `storage_root` needs to be the storage root of the trie this cache is used for. - pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> { - let shared_inner = self.shared.read_lock_inner(); - - let value_cache = ValueCache::ForStorageRoot { - storage_root, - local_value_cache: self.value_cache.lock(), - shared_value_cache_access: self.shared_value_cache_access.lock(), - }; - - TrieCache { - shared_inner, - local_cache: self.node_cache.lock(), - value_cache, - shared_node_cache_access: self.shared_node_cache_access.lock(), - } - } - - /// Return self as [`TrieDBMut`](trie_db::TrieDBMut) compatible cache. - /// - /// After finishing all operations with [`TrieDBMut`](trie_db::TrieDBMut) and having obtained - /// the new storage root, [`TrieCache::merge_into`] should be called to update this local - /// cache instance. If the function is not called, cached data is just thrown away and not - /// propagated to the shared cache. So, accessing these new items will be slower, but nothing - /// would break because of this. - pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> { - TrieCache { - shared_inner: self.shared.read_lock_inner(), - local_cache: self.node_cache.lock(), - value_cache: ValueCache::Fresh(Default::default()), - shared_node_cache_access: self.shared_node_cache_access.lock(), - } - } -} - -impl Drop for LocalTrieCache { - fn drop(&mut self) { - let mut shared_inner = self.shared.write_lock_inner(); - - shared_inner - .node_cache_mut() - .update(self.node_cache.lock().drain(), self.shared_node_cache_access.lock().drain()); - - shared_inner - .value_cache_mut() - .update(self.value_cache.lock().drain(), self.shared_value_cache_access.lock().drain()); - } -} - -/// The abstraction of the value cache for the [`TrieCache`]. -enum ValueCache<'a, H> { - /// The value cache is fresh, aka not yet associated to any storage root. - /// This is used for example when a new trie is being build, to cache new values. - Fresh(HashMap, CachedValue>), - /// The value cache is already bound to a specific storage root. - ForStorageRoot { - shared_value_cache_access: MutexGuard< - 'a, - HashSet>, - >, - local_value_cache: MutexGuard< - 'a, - HashMap< - ValueCacheKey<'static, H>, - CachedValue, - nohash_hasher::BuildNoHashHasher>, - >, - >, - storage_root: H, - }, -} - -impl + std::hash::Hash + Eq + Clone + Copy> ValueCache<'_, H> { - /// Get the value for the given `key`. - fn get<'a>( - &'a mut self, - key: &[u8], - shared_value_cache: &'a SharedValueCache, - ) -> Option<&CachedValue> { - match self { - Self::Fresh(map) => map.get(key), - Self::ForStorageRoot { local_value_cache, shared_value_cache_access, storage_root } => { - let key = ValueCacheKey::new_ref(key, *storage_root); - - // We first need to look up in the local cache and then the shared cache. - // It can happen that some value is cached in the shared cache, but the - // weak reference of the data can not be upgraded anymore. This for example - // happens when the node is dropped that contains the strong reference to the data. - // - // So, the logic of the trie would lookup the data and the node and store both - // in our local caches. - local_value_cache - .get(unsafe { - // SAFETY - // - // We need to convert the lifetime to make the compiler happy. However, as - // we only use the `key` to looking up the value this lifetime conversion is - // safe. - std::mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>( - &key, - ) - }) - .or_else(|| { - shared_value_cache.get(&key).map(|v| { - shared_value_cache_access.insert(key.get_hash()); - v - }) - }) - }, - } - } - - /// Insert some new `value` under the given `key`. - fn insert(&mut self, key: &[u8], value: CachedValue) { - match self { - Self::Fresh(map) => { - map.insert(key.into(), value); - }, - Self::ForStorageRoot { local_value_cache, storage_root, .. } => { - local_value_cache.insert(ValueCacheKey::new_value(key, *storage_root), value); - }, - } - } -} - -/// The actual [`TrieCache`](trie_db::TrieCache) implementation. -/// -/// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to -/// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are -/// done. -pub struct TrieCache<'a, H: Hasher> { - shared_inner: RwLockReadGuard<'a, SharedTrieCacheInner>, - shared_node_cache_access: MutexGuard<'a, HashSet>, - local_cache: MutexGuard<'a, HashMap>>, - value_cache: ValueCache<'a, H::Out>, -} - -impl<'a, H: Hasher> TrieCache<'a, H> { - /// Merge this cache into the given [`LocalTrieCache`]. - /// - /// This function is only required to be called when this instance was created through - /// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given - /// `storage_root` is the new storage root that was obtained after finishing all operations - /// using the [`TrieDBMut`](trie_db::TrieDBMut). - pub fn merge_into(self, local: &LocalTrieCache, storage_root: H::Out) { - let cache = if let ValueCache::Fresh(cache) = self.value_cache { cache } else { return }; - - if !cache.is_empty() { - let mut value_cache = local.value_cache.lock(); - let partial_hash = ValueCacheKey::hash_partial_data(&storage_root); - - cache - .into_iter() - .map(|(k, v)| { - let hash = - ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k); - (ValueCacheKey::Value { storage_key: k, storage_root, hash }, v) - }) - .for_each(|(k, v)| { - value_cache.insert(k, v); - }); - } - } -} - -impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { - fn get_or_insert_node( - &mut self, - hash: H::Out, - fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, - ) -> trie_db::Result<&NodeOwned, H::Out, Error> { - if let Some(res) = self.shared_inner.node_cache().get(&hash) { - tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); - self.shared_node_cache_access.insert(hash); - return Ok(res) - } - - match self.local_cache.entry(hash) { - MapEntry::Occupied(res) => { - tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache"); - Ok(res.into_mut()) - }, - MapEntry::Vacant(vacant) => { - let node = (*fetch_node)(); - - tracing::trace!( - target: LOG_TARGET, - ?hash, - fetch_successful = node.is_ok(), - "Node not found, needed to fetch it." - ); - - Ok(vacant.insert(node?)) - }, - } - } - - fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned> { - if let Some(node) = self.shared_inner.node_cache().get(hash) { - tracing::trace!(target: LOG_TARGET, ?hash, "Getting node from shared cache"); - self.shared_node_cache_access.insert(*hash); - return Some(node) - } - - let res = self.local_cache.get(hash); - - tracing::trace!( - target: LOG_TARGET, - ?hash, - found = res.is_some(), - "Getting node from local cache" - ); - - res - } - - fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue> { - let res = self.value_cache.get(key, self.shared_inner.value_cache()); - - tracing::trace!( - target: LOG_TARGET, - key = ?sp_core::hexdisplay::HexDisplay::from(&key), - found = res.is_some(), - "Looked up value for key", - ); - - res - } - - fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue) { - tracing::trace!( - target: LOG_TARGET, - key = ?sp_core::hexdisplay::HexDisplay::from(&key), - "Caching value for key", - ); - - self.value_cache.insert(key.into(), data); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; - - type MemoryDB = crate::MemoryDB; - type Layout = crate::LayoutV1; - type Cache = super::SharedTrieCache; - type Recorder = crate::recorder::Recorder; - - const TEST_DATA: &[(&[u8], &[u8])] = - &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])]; - const CACHE_SIZE_RAW: usize = 1024 * 10; - const CACHE_SIZE: CacheSize = CacheSize::Maximum(CACHE_SIZE_RAW); - - fn create_trie() -> (MemoryDB, TrieHash) { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } - } - - (db, root) - } - - #[test] - fn basic_cache_works() { - let (db, root) = create_trie(); - - let shared_cache = Cache::new(CACHE_SIZE); - let local_cache = shared_cache.local_cache(); - - { - let mut cache = local_cache.as_trie_db_cache(root); - let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); - } - - // Local cache wasn't dropped yet, so there should nothing in the shared caches. - assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty()); - assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty()); - - drop(local_cache); - - // Now we should have the cached items in the shared cache. - assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1); - let cached_data = shared_cache - .read_lock_inner() - .value_cache() - .lru - .peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root)) - .unwrap() - .clone(); - assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap()); - - let fake_data = Bytes::from(&b"fake_data"[..]); - - let local_cache = shared_cache.local_cache(); - shared_cache.write_lock_inner().value_cache_mut().lru.put( - ValueCacheKey::new_value(TEST_DATA[1].0, root), - (fake_data.clone(), Default::default()).into(), - ); - - { - let mut cache = local_cache.as_trie_db_cache(root); - let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - - // We should now get the "fake_data", because we inserted this manually to the cache. - assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap()); - } - } - - #[test] - fn trie_db_mut_cache_works() { - let (mut db, root) = create_trie(); - - let new_key = b"new_key".to_vec(); - // Use some long value to not have it inlined - let new_value = vec![23; 64]; - - let shared_cache = Cache::new(CACHE_SIZE); - let mut new_root = root; - - { - let local_cache = shared_cache.local_cache(); - - let mut cache = local_cache.as_trie_db_mut_cache(); - - { - let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) - .with_cache(&mut cache) - .build(); - - trie.insert(&new_key, &new_value).unwrap(); - } - - cache.merge_into(&local_cache, new_root); - } - - // After the local cache is dropped, all changes should have been merged back to the shared - // cache. - let cached_data = shared_cache - .read_lock_inner() - .value_cache() - .lru - .peek(&ValueCacheKey::new_value(new_key, new_root)) - .unwrap() - .clone(); - assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap()); - } - - #[test] - fn trie_db_cache_and_recorder_work_together() { - let (db, root) = create_trie(); - - let shared_cache = Cache::new(CACHE_SIZE); - - for i in 0..5 { - // Clear some of the caches. - if i == 2 { - shared_cache.reset_node_cache(); - } else if i == 3 { - shared_cache.reset_value_cache(); - } - - let local_cache = shared_cache.local_cache(); - let recorder = Recorder::default(); - - { - let mut cache = local_cache.as_trie_db_cache(root); - let mut recorder = recorder.as_trie_recorder(root); - let trie = TrieDBBuilder::::new(&db, &root) - .with_cache(&mut cache) - .with_recorder(&mut recorder) - .build(); - - for (key, value) in TEST_DATA { - assert_eq!(*value, trie.get(&key).unwrap().unwrap()); - } - } - - let storage_proof = recorder.drain_storage_proof(); - let memory_db: MemoryDB = storage_proof.into_memory_db(); - - { - let trie = TrieDBBuilder::::new(&memory_db, &root).build(); - - for (key, value) in TEST_DATA { - assert_eq!(*value, trie.get(&key).unwrap().unwrap()); - } - } - } - } - - #[test] - fn trie_db_mut_cache_and_recorder_work_together() { - const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])]; - - let (db, root) = create_trie(); - - let shared_cache = Cache::new(CACHE_SIZE); - - // Run this twice so that we use the data cache in the second run. - for i in 0..5 { - // Clear some of the caches. - if i == 2 { - shared_cache.reset_node_cache(); - } else if i == 3 { - shared_cache.reset_value_cache(); - } - - let recorder = Recorder::default(); - let local_cache = shared_cache.local_cache(); - let mut new_root = root; - - { - let mut db = db.clone(); - let mut cache = local_cache.as_trie_db_cache(root); - let mut recorder = recorder.as_trie_recorder(root); - let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) - .with_cache(&mut cache) - .with_recorder(&mut recorder) - .build(); - - for (key, value) in DATA_TO_ADD { - trie.insert(key, value).unwrap(); - } - } - - let storage_proof = recorder.drain_storage_proof(); - let mut memory_db: MemoryDB = storage_proof.into_memory_db(); - let mut proof_root = root; - - { - let mut trie = - TrieDBMutBuilder::::from_existing(&mut memory_db, &mut proof_root) - .build(); - - for (key, value) in DATA_TO_ADD { - trie.insert(key, value).unwrap(); - } - } - - assert_eq!(new_root, proof_root) - } - } - - #[test] - fn cache_lru_works() { - let (db, root) = create_trie(); - - let shared_cache = Cache::new(CACHE_SIZE); - - { - let local_cache = shared_cache.local_cache(); - - let mut cache = local_cache.as_trie_db_cache(root); - let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - - for (k, _) in TEST_DATA { - trie.get(k).unwrap().unwrap(); - } - } - - // Check that all items are there. - assert!(shared_cache - .read_lock_inner() - .value_cache() - .lru - .iter() - .map(|d| d.0) - .all(|l| TEST_DATA.iter().any(|d| l.storage_key().unwrap() == d.0))); - - // Run this in a loop. The first time we check that with the filled value cache, - // the expected values are at the top of the LRU. - // The second run is using an empty value cache to ensure that we access the nodes. - for _ in 0..2 { - { - let local_cache = shared_cache.local_cache(); - - let mut cache = local_cache.as_trie_db_cache(root); - let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - - for (k, _) in TEST_DATA.iter().take(2) { - trie.get(k).unwrap().unwrap(); - } - } - - // Ensure that the accessed items are most recently used items of the shared value - // cache. - assert!(shared_cache - .read_lock_inner() - .value_cache() - .lru - .iter() - .take(2) - .map(|d| d.0) - .all(|l| { TEST_DATA.iter().take(2).any(|d| l.storage_key().unwrap() == d.0) })); - - // Delete the value cache, so that we access the nodes. - shared_cache.reset_value_cache(); - } - - let most_recently_used_nodes = shared_cache - .read_lock_inner() - .node_cache() - .lru - .iter() - .map(|d| *d.0) - .collect::>(); - - { - let local_cache = shared_cache.local_cache(); - - let mut cache = local_cache.as_trie_db_cache(root); - let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - - for (k, _) in TEST_DATA.iter().skip(2) { - trie.get(k).unwrap().unwrap(); - } - } - - // Ensure that the most recently used nodes changed as well. - assert_ne!( - most_recently_used_nodes, - shared_cache - .read_lock_inner() - .node_cache() - .lru - .iter() - .map(|d| *d.0) - .collect::>() - ); - } - - #[test] - fn cache_respects_bounds() { - let (mut db, root) = create_trie(); - - let shared_cache = Cache::new(CACHE_SIZE); - { - let local_cache = shared_cache.local_cache(); - - let mut new_root = root; - - { - let mut cache = local_cache.as_trie_db_cache(root); - { - let mut trie = - TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) - .with_cache(&mut cache) - .build(); - - let value = vec![10u8; 100]; - // Ensure we add enough data that would overflow the cache. - for i in 0..CACHE_SIZE_RAW / 100 * 2 { - trie.insert(format!("key{}", i).as_bytes(), &value).unwrap(); - } - } - - cache.merge_into(&local_cache, new_root); - } - } - - let node_cache_size = shared_cache.read_lock_inner().node_cache().size_in_bytes; - let value_cache_size = shared_cache.read_lock_inner().value_cache().size_in_bytes; - - assert!(node_cache_size + value_cache_size < CACHE_SIZE_RAW); - } -} diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs deleted file mode 100644 index 9d4d36b83a28a..0000000000000 --- a/primitives/trie/src/cache/shared_cache.rs +++ /dev/null @@ -1,677 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -///! Provides the [`SharedNodeCache`], the [`SharedValueCache`] and the [`SharedTrieCache`] -///! that combines both caches and is exported to the outside. -use super::{CacheSize, LOG_TARGET}; -use hash_db::Hasher; -use hashbrown::{hash_set::Entry as SetEntry, HashSet}; -use lru::LruCache; -use nohash_hasher::BuildNoHashHasher; -use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use std::{ - hash::{BuildHasher, Hasher as _}, - mem, - sync::Arc, -}; -use trie_db::{node::NodeOwned, CachedValue}; - -lazy_static::lazy_static! { - static ref RANDOM_STATE: ahash::RandomState = ahash::RandomState::default(); -} - -/// No hashing [`LruCache`]. -type NoHashingLruCache = LruCache>; - -/// The shared node cache. -/// -/// Internally this stores all cached nodes in a [`LruCache`]. It ensures that when updating the -/// cache, that the cache stays within its allowed bounds. -pub(super) struct SharedNodeCache { - /// The cached nodes, ordered by least recently used. - pub(super) lru: LruCache>, - /// The size of [`Self::lru`] in bytes. - pub(super) size_in_bytes: usize, - /// The maximum cache size of [`Self::lru`]. - maximum_cache_size: CacheSize, -} - -impl + Eq + std::hash::Hash> SharedNodeCache { - /// Create a new instance. - fn new(cache_size: CacheSize) -> Self { - Self { lru: LruCache::unbounded(), size_in_bytes: 0, maximum_cache_size: cache_size } - } - - /// Get the node for `key`. - /// - /// This doesn't change the least recently order in the internal [`LruCache`]. - pub fn get(&self, key: &H) -> Option<&NodeOwned> { - self.lru.peek(key) - } - - /// Update the cache with the `added` nodes and the `accessed` nodes. - /// - /// The `added` nodes are the ones that have been collected by doing operations on the trie and - /// now should be stored in the shared cache. The `accessed` nodes are only referenced by hash - /// and represent the nodes that were retrieved from this shared cache through [`Self::get`]. - /// These `accessed` nodes are being put to the front of the internal [`LruCache`] like the - /// `added` ones. - /// - /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is - /// inside its bounds ([`Self::maximum_size_in_bytes`]). - pub fn update( - &mut self, - added: impl IntoIterator)>, - accessed: impl IntoIterator, - ) { - let update_size_in_bytes = |size_in_bytes: &mut usize, key: &H, node: &NodeOwned| { - if let Some(new_size_in_bytes) = - size_in_bytes.checked_sub(key.as_ref().len() + node.size_in_bytes()) - { - *size_in_bytes = new_size_in_bytes; - } else { - *size_in_bytes = 0; - tracing::error!(target: LOG_TARGET, "`SharedNodeCache` underflow detected!",); - } - }; - - accessed.into_iter().for_each(|key| { - // Access every node in the lru to put it to the front. - self.lru.get(&key); - }); - added.into_iter().for_each(|(key, node)| { - self.size_in_bytes += key.as_ref().len() + node.size_in_bytes(); - - if let Some((r_key, r_node)) = self.lru.push(key, node) { - update_size_in_bytes(&mut self.size_in_bytes, &r_key, &r_node); - } - - // Directly ensure that we respect the maximum size. By doing it directly here we ensure - // that the internal map of the [`LruCache`] doesn't grow too much. - while self.maximum_cache_size.exceeds(self.size_in_bytes) { - // This should always be `Some(_)`, otherwise something is wrong! - if let Some((key, node)) = self.lru.pop_lru() { - update_size_in_bytes(&mut self.size_in_bytes, &key, &node); - } - } - }); - } - - /// Reset the cache. - fn reset(&mut self) { - self.size_in_bytes = 0; - self.lru.clear(); - } -} - -/// The hash of [`ValueCacheKey`]. -#[derive(Eq, Clone, Copy)] -pub struct ValueCacheKeyHash(u64); - -impl ValueCacheKeyHash { - pub fn from_hasher_and_storage_key( - mut hasher: impl std::hash::Hasher, - storage_key: &[u8], - ) -> Self { - hasher.write(storage_key); - - Self(hasher.finish()) - } -} - -impl PartialEq for ValueCacheKeyHash { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} - -impl std::hash::Hash for ValueCacheKeyHash { - fn hash(&self, state: &mut Hasher) { - state.write_u64(self.0); - } -} - -impl nohash_hasher::IsEnabled for ValueCacheKeyHash {} - -/// A type that can only be constructed inside of this file. -/// -/// It "requires" that the user has read the docs to prevent fuck ups. -#[derive(Eq, PartialEq)] -pub(super) struct IReadTheDocumentation(()); - -/// The key type that is being used to address a [`CachedValue`]. -/// -/// This type is implemented as `enum` to improve the performance when accessing the value cache. -/// The problem being that we need to calculate the `hash` of [`Self`] in worst case three times -/// when trying to find a value in the value cache. First to lookup the local cache, then the shared -/// cache and if we found it in the shared cache a third time to insert it into the list of accessed -/// values. To work around each variant stores the `hash` to identify a unique combination of -/// `storage_key` and `storage_root`. However, be aware that this `hash` can lead to collisions when -/// there are two different `storage_key` and `storage_root` pairs that map to the same `hash`. This -/// type also has the `Hash` variant. This variant should only be used for the use case of updating -/// the lru for a key. Because when using only the `Hash` variant to getting a value from a hash map -/// it could happen that a wrong value is returned when there is another key in the same hash map -/// that maps to the same `hash`. The [`PartialEq`] implementation is written in a way that when one -/// of the two compared instances is the `Hash` variant, we will only compare the hashes. This -/// ensures that we can use the `Hash` variant to bring values up in the lru. -#[derive(Eq)] -pub(super) enum ValueCacheKey<'a, H> { - /// Variant that stores the `storage_key` by value. - Value { - /// The storage root of the trie this key belongs to. - storage_root: H, - /// The key to access the value in the storage. - storage_key: Arc<[u8]>, - /// The hash that identifying this instance of `storage_root` and `storage_key`. - hash: ValueCacheKeyHash, - }, - /// Variant that only references the `storage_key`. - Ref { - /// The storage root of the trie this key belongs to. - storage_root: H, - /// The key to access the value in the storage. - storage_key: &'a [u8], - /// The hash that identifying this instance of `storage_root` and `storage_key`. - hash: ValueCacheKeyHash, - }, - /// Variant that only stores the hash that represents the `storage_root` and `storage_key`. - /// - /// This should be used by caution, because it can lead to accessing the wrong value in a - /// hash map/set when there exists two different `storage_root`s and `storage_key`s that - /// map to the same `hash`. - Hash { hash: ValueCacheKeyHash, _i_read_the_documentation: IReadTheDocumentation }, -} - -impl<'a, H> ValueCacheKey<'a, H> { - /// Constructs [`Self::Value`]. - pub fn new_value(storage_key: impl Into>, storage_root: H) -> Self - where - H: AsRef<[u8]>, - { - let storage_key = storage_key.into(); - let hash = Self::hash_data(&storage_key, &storage_root); - Self::Value { storage_root, storage_key, hash } - } - - /// Constructs [`Self::Ref`]. - pub fn new_ref(storage_key: &'a [u8], storage_root: H) -> Self - where - H: AsRef<[u8]>, - { - let storage_key = storage_key.into(); - let hash = Self::hash_data(storage_key, &storage_root); - Self::Ref { storage_root, storage_key, hash } - } - - /// Returns a hasher prepared to build the final hash to identify [`Self`]. - /// - /// See [`Self::hash_data`] for building the hash directly. - pub fn hash_partial_data(storage_root: &H) -> impl std::hash::Hasher + Clone - where - H: AsRef<[u8]>, - { - let mut hasher = RANDOM_STATE.build_hasher(); - hasher.write(storage_root.as_ref()); - hasher - } - - /// Hash the `key` and `storage_root` that identify [`Self`]. - /// - /// Returns a `u64` which represents the unique hash for the given inputs. - pub fn hash_data(key: &[u8], storage_root: &H) -> ValueCacheKeyHash - where - H: AsRef<[u8]>, - { - let hasher = Self::hash_partial_data(storage_root); - - ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key) - } - - /// Returns the `hash` that identifies the current instance. - pub fn get_hash(&self) -> ValueCacheKeyHash { - match self { - Self::Value { hash, .. } | Self::Ref { hash, .. } | Self::Hash { hash, .. } => *hash, - } - } - - /// Returns the stored storage root. - pub fn storage_root(&self) -> Option<&H> { - match self { - Self::Value { storage_root, .. } | Self::Ref { storage_root, .. } => Some(storage_root), - Self::Hash { .. } => None, - } - } - - /// Returns the stored storage key. - pub fn storage_key(&self) -> Option<&[u8]> { - match self { - Self::Ref { storage_key, .. } => Some(&storage_key), - Self::Value { storage_key, .. } => Some(storage_key), - Self::Hash { .. } => None, - } - } -} - -// Implement manually to ensure that the `Value` and `Hash` are treated equally. -impl std::hash::Hash for ValueCacheKey<'_, H> { - fn hash(&self, state: &mut Hasher) { - self.get_hash().hash(state) - } -} - -impl nohash_hasher::IsEnabled for ValueCacheKey<'_, H> {} - -// Implement manually to ensure that the `Value` and `Hash` are treated equally. -impl PartialEq for ValueCacheKey<'_, H> { - fn eq(&self, other: &Self) -> bool { - // First check if `self` or `other` is only the `Hash`. - // Then we only compare the `hash`. So, there could actually be some collision - // if two different storage roots and keys are mapping to the same key. See the - // [`ValueCacheKey`] docs for more information. - match (self, other) { - (Self::Hash { hash, .. }, Self::Hash { hash: other_hash, .. }) => hash == other_hash, - (Self::Hash { hash, .. }, _) => *hash == other.get_hash(), - (_, Self::Hash { hash: other_hash, .. }) => self.get_hash() == *other_hash, - // If both are not the `Hash` variant, we compare all the values. - _ => - self.get_hash() == other.get_hash() && - self.storage_root() == other.storage_root() && - self.storage_key() == other.storage_key(), - } - } -} - -/// The shared value cache. -/// -/// The cache ensures that it stays in the configured size bounds. -pub(super) struct SharedValueCache { - /// The cached nodes, ordered by least recently used. - pub(super) lru: NoHashingLruCache, CachedValue>, - /// The size of [`Self::lru`] in bytes. - pub(super) size_in_bytes: usize, - /// The maximum cache size of [`Self::lru`]. - maximum_cache_size: CacheSize, - /// All known storage keys that are stored in [`Self::lru`]. - /// - /// This is used to de-duplicate keys in memory that use the - /// same [`SharedValueCache::storage_key`], but have a different - /// [`SharedValueCache::storage_root`]. - known_storage_keys: HashSet>, -} - -impl> SharedValueCache { - /// Create a new instance. - fn new(cache_size: CacheSize) -> Self { - Self { - lru: NoHashingLruCache::unbounded_with_hasher(Default::default()), - size_in_bytes: 0, - maximum_cache_size: cache_size, - known_storage_keys: Default::default(), - } - } - - /// Get the [`CachedValue`] for `key`. - /// - /// This doesn't change the least recently order in the internal [`LruCache`]. - pub fn get<'a>(&'a self, key: &ValueCacheKey) -> Option<&'a CachedValue> { - debug_assert!( - !matches!(key, ValueCacheKey::Hash { .. }), - "`get` can not be called with `Hash` variant as this may returns the wrong value." - ); - - self.lru.peek(unsafe { - // SAFETY - // - // We need to convert the lifetime to make the compiler happy. However, as - // we only use the `key` to looking up the value this lifetime conversion is - // safe. - mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>(key) - }) - } - - /// Update the cache with the `added` values and the `accessed` values. - /// - /// The `added` values are the ones that have been collected by doing operations on the trie and - /// now should be stored in the shared cache. The `accessed` values are only referenced by the - /// [`ValueCacheKeyHash`] and represent the values that were retrieved from this shared cache - /// through [`Self::get`]. These `accessed` values are being put to the front of the internal - /// [`LruCache`] like the `added` ones. - /// - /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is - /// inside its bounds ([`Self::maximum_size_in_bytes`]). - pub fn update( - &mut self, - added: impl IntoIterator, CachedValue)>, - accessed: impl IntoIterator, - ) { - // The base size in memory per ([`ValueCacheKey`], [`CachedValue`]). - let base_size = mem::size_of::>() + mem::size_of::>(); - let known_keys_entry_size = mem::size_of::>(); - - let update_size_in_bytes = - |size_in_bytes: &mut usize, r_key: Arc<[u8]>, known_keys: &mut HashSet>| { - // If the `strong_count == 2`, it means this is the last instance of the key. - // One being `r_key` and the other being stored in `known_storage_keys`. - let last_instance = Arc::strong_count(&r_key) == 2; - - let key_len = if last_instance { - known_keys.remove(&r_key); - r_key.len() + known_keys_entry_size - } else { - // The key is still in `keys`, because it is still used by another - // `ValueCacheKey`. - 0 - }; - - if let Some(new_size_in_bytes) = size_in_bytes.checked_sub(key_len + base_size) { - *size_in_bytes = new_size_in_bytes; - } else { - *size_in_bytes = 0; - tracing::error!(target: LOG_TARGET, "`SharedValueCache` underflow detected!",); - } - }; - - accessed.into_iter().for_each(|key| { - // Access every node in the lru to put it to the front. - // As we are using the `Hash` variant here, it may leads to putting the wrong value to - // the top. However, the only consequence of this is that we may prune a recently used - // value to early. - self.lru.get(&ValueCacheKey::Hash { - hash: key, - _i_read_the_documentation: IReadTheDocumentation(()), - }); - }); - - added.into_iter().for_each(|(key, value)| { - let (storage_root, storage_key, key_hash) = match key { - ValueCacheKey::Hash { .. } => { - // Ignore the hash variant and try the next. - tracing::error!( - target: LOG_TARGET, - "`SharedValueCached::update` was called with a key to add \ - that uses the `Hash` variant. This would lead to potential hash collision!", - ); - return - }, - ValueCacheKey::Ref { storage_key, storage_root, hash } => - (storage_root, storage_key.into(), hash), - ValueCacheKey::Value { storage_root, storage_key, hash } => - (storage_root, storage_key, hash), - }; - - let (size_update, storage_key) = - match self.known_storage_keys.entry(storage_key.clone()) { - SetEntry::Vacant(v) => { - let len = v.get().len(); - v.insert(); - - // If the key was unknown, we need to also take its length and the size of - // the entry of `known_keys` into account. - (len + base_size + known_keys_entry_size, storage_key) - }, - SetEntry::Occupied(o) => { - // Key is known - (base_size, o.get().clone()) - }, - }; - - self.size_in_bytes += size_update; - - if let Some((r_key, _)) = self - .lru - .push(ValueCacheKey::Value { storage_key, storage_root, hash: key_hash }, value) - { - if let ValueCacheKey::Value { storage_key, .. } = r_key { - update_size_in_bytes( - &mut self.size_in_bytes, - storage_key, - &mut self.known_storage_keys, - ); - } - } - - // Directly ensure that we respect the maximum size. By doing it directly here we - // ensure that the internal map of the [`LruCache`] doesn't grow too much. - while self.maximum_cache_size.exceeds(self.size_in_bytes) { - // This should always be `Some(_)`, otherwise something is wrong! - if let Some((r_key, _)) = self.lru.pop_lru() { - if let ValueCacheKey::Value { storage_key, .. } = r_key { - update_size_in_bytes( - &mut self.size_in_bytes, - storage_key, - &mut self.known_storage_keys, - ); - } - } - } - }); - } - - /// Reset the cache. - fn reset(&mut self) { - self.size_in_bytes = 0; - self.lru.clear(); - self.known_storage_keys.clear(); - } -} - -/// The inner of [`SharedTrieCache`]. -pub(super) struct SharedTrieCacheInner { - node_cache: SharedNodeCache, - value_cache: SharedValueCache, -} - -impl SharedTrieCacheInner { - /// Returns a reference to the [`SharedValueCache`]. - pub(super) fn value_cache(&self) -> &SharedValueCache { - &self.value_cache - } - - /// Returns a mutable reference to the [`SharedValueCache`]. - pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache { - &mut self.value_cache - } - - /// Returns a reference to the [`SharedNodeCache`]. - pub(super) fn node_cache(&self) -> &SharedNodeCache { - &self.node_cache - } - - /// Returns a mutable reference to the [`SharedNodeCache`]. - pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache { - &mut self.node_cache - } -} - -/// The shared trie cache. -/// -/// It should be instantiated once per node. It will hold the trie nodes and values of all -/// operations to the state. To not use all available memory it will ensure to stay in the -/// bounds given via the [`CacheSize`] at startup. -/// -/// The instance of this object can be shared between multiple threads. -pub struct SharedTrieCache { - inner: Arc>>, -} - -impl Clone for SharedTrieCache { - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } - } -} - -impl SharedTrieCache { - /// Create a new [`SharedTrieCache`]. - pub fn new(cache_size: CacheSize) -> Self { - let (node_cache_size, value_cache_size) = match cache_size { - CacheSize::Maximum(max) => { - // Allocate 20% for the value cache. - let value_cache_size_in_bytes = (max as f32 * 0.20) as usize; - - ( - CacheSize::Maximum(max - value_cache_size_in_bytes), - CacheSize::Maximum(value_cache_size_in_bytes), - ) - }, - CacheSize::Unlimited => (CacheSize::Unlimited, CacheSize::Unlimited), - }; - - Self { - inner: Arc::new(RwLock::new(SharedTrieCacheInner { - node_cache: SharedNodeCache::new(node_cache_size), - value_cache: SharedValueCache::new(value_cache_size), - })), - } - } - - /// Create a new [`LocalTrieCache`](super::LocalTrieCache) instance from this shared cache. - pub fn local_cache(&self) -> super::LocalTrieCache { - super::LocalTrieCache { - shared: self.clone(), - node_cache: Default::default(), - value_cache: Default::default(), - shared_node_cache_access: Default::default(), - shared_value_cache_access: Default::default(), - } - } - - /// Returns the used memory size of this cache in bytes. - pub fn used_memory_size(&self) -> usize { - let inner = self.inner.read(); - let value_cache_size = inner.value_cache.size_in_bytes; - let node_cache_size = inner.node_cache.size_in_bytes; - - node_cache_size + value_cache_size - } - - /// Reset the node cache. - pub fn reset_node_cache(&self) { - self.inner.write().node_cache.reset(); - } - - /// Reset the value cache. - pub fn reset_value_cache(&self) { - self.inner.write().value_cache.reset(); - } - - /// Reset the entire cache. - pub fn reset(&self) { - self.reset_node_cache(); - self.reset_value_cache(); - } - - /// Returns the read locked inner. - pub(super) fn read_lock_inner(&self) -> RwLockReadGuard<'_, SharedTrieCacheInner> { - self.inner.read() - } - - /// Returns the write locked inner. - pub(super) fn write_lock_inner(&self) -> RwLockWriteGuard<'_, SharedTrieCacheInner> { - self.inner.write() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::H256 as Hash; - - #[test] - fn shared_value_cache_works() { - let base_size = mem::size_of::>() + mem::size_of::>(); - let arc_size = mem::size_of::>(); - - let mut cache = SharedValueCache::::new(CacheSize::Maximum( - (base_size + arc_size + 10) * 10, - )); - - let key = vec![0; 10]; - - let root0 = Hash::repeat_byte(1); - let root1 = Hash::repeat_byte(2); - - cache.update( - vec![ - (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting), - (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting), - ], - vec![], - ); - - // Ensure that the basics are working - assert_eq!(1, cache.known_storage_keys.len()); - assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); - assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); - - // Just accessing a key should not change anything on the size and number of entries. - cache.update(vec![], vec![ValueCacheKey::hash_data(&key[..], &root0)]); - assert_eq!(1, cache.known_storage_keys.len()); - assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); - assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); - - // Add 9 other entries and this should move out the key for `root1`. - cache.update( - (1..10) - .map(|i| vec![i; 10]) - .map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)), - vec![], - ); - - assert_eq!(10, cache.known_storage_keys.len()); - assert_eq!(2, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); - assert_eq!((base_size + key.len() + arc_size) * 10, cache.size_in_bytes); - assert!(matches!( - cache.get(&ValueCacheKey::new_ref(&key, root0)).unwrap(), - CachedValue::::NonExisting - )); - assert!(cache.get(&ValueCacheKey::new_ref(&key, root1)).is_none()); - - cache.update( - vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)], - vec![], - ); - - assert!(cache.known_storage_keys.get(&key[..]).is_none()); - } - - #[test] - fn value_cache_key_eq_works() { - let storage_key = &b"something"[..]; - let storage_key2 = &b"something2"[..]; - let storage_root = Hash::random(); - - let value = ValueCacheKey::new_value(storage_key, storage_root); - // Ref gets the same hash, but a different storage key - let ref_ = - ValueCacheKey::Ref { storage_root, storage_key: storage_key2, hash: value.get_hash() }; - let hash = ValueCacheKey::Hash { - hash: value.get_hash(), - _i_read_the_documentation: IReadTheDocumentation(()), - }; - - // Ensure that the hash variants is equal to `value`, `ref_` and itself. - assert!(hash == value); - assert!(value == hash); - assert!(hash == ref_); - assert!(ref_ == hash); - assert!(hash == hash); - - // But when we compare `value` and `ref_` the different storage key is detected. - assert!(value != ref_); - assert!(ref_ != value); - } -} diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index a781d408e994f..e0b3642b6db76 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -15,33 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::{boxed::Box, vec::Vec}; - -/// Error type used for trie related errors. +/// Error for trie node decoding. #[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(thiserror::Error))] -pub enum Error { +pub enum Error { #[cfg_attr(feature = "std", error("Bad format"))] BadFormat, #[cfg_attr(feature = "std", error("Decoding failed: {0}"))] Decode(#[cfg_attr(feature = "std", source)] codec::Error), - #[cfg_attr( - feature = "std", - error("Recorded key ({0:x?}) access with value as found={1}, but could not confirm with trie.") - )] - InvalidRecording(Vec, bool), - #[cfg_attr(feature = "std", error("Trie error: {0:?}"))] - TrieError(Box>), } -impl From for Error { +impl From for Error { fn from(x: codec::Error) -> Self { Error::Decode(x) } } - -impl From>> for Error { - fn from(x: Box>) -> Self { - Error::TrieError(x) - } -} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index d036db7b1fecd..7a17d44aa5b69 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -19,13 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -pub mod cache; mod error; mod node_codec; mod node_header; -#[cfg(feature = "std")] -pub mod recorder; mod storage_proof; mod trie_codec; mod trie_stream; @@ -50,17 +46,17 @@ use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::{ nibble_ops, node::{NodePlan, ValuePlan}, - CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator, - TrieDBKeyIterator, TrieLayout, TrieMut, TrieRecorder, + CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, + TrieLayout, TrieMut, }; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; /// substrate trie layout -pub struct LayoutV0(PhantomData); +pub struct LayoutV0(sp_std::marker::PhantomData); /// substrate trie layout, with external value nodes. -pub struct LayoutV1(PhantomData); +pub struct LayoutV1(sp_std::marker::PhantomData); impl TrieLayout for LayoutV0 where @@ -171,15 +167,11 @@ pub type MemoryDB = memory_db::MemoryDB, trie_db::DB pub type GenericMemoryDB = memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. -pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; -/// Builder for creating a [`TrieDB`]. -pub type TrieDBBuilder<'a, 'cache, L> = trie_db::TrieDBBuilder<'a, 'cache, L>; +pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; -/// Builder for creating a [`TrieDBMut`]. -pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; /// Querying interface, as in `trie_db` but less generic. -pub type Lookup<'a, 'cache, L, Q> = trie_db::Lookup<'a, 'cache, L, Q>; +pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. @@ -188,23 +180,18 @@ pub mod trie_types { use super::*; /// Persistent trie database read-access interface for the a given hasher. - /// /// Read only V1 and V0 are compatible, thus we always use V1. - pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; - /// Builder for creating a [`TrieDB`]. - pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; + pub type TrieDB<'a, H> = super::TrieDB<'a, LayoutV1>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; - /// Builder for creating a [`TrieDBMutV0`]. - pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; - /// Builder for creating a [`TrieDBMutV1`]. - pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; /// Querying interface, as in `trie_db` but less generic. - pub type Lookup<'a, 'cache, H, Q> = trie_db::Lookup<'a, 'cache, LayoutV1, Q>; + pub type LookupV0<'a, H, Q> = trie_db::Lookup<'a, LayoutV0, Q>; + /// Querying interface, as in `trie_db` but less generic. + pub type LookupV1<'a, H, Q> = trie_db::Lookup<'a, LayoutV1, Q>; /// As in `trie_db`, but less generic, error type for the crate. - pub type TrieError = trie_db::TrieError>; + pub type TrieError = trie_db::TrieError; } /// Create a proof for a subset of keys in a trie. @@ -226,7 +213,9 @@ where K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { - generate_proof::<_, L, _, _>(db, &root, keys) + // Can use default layout (read only). + let trie = TrieDB::::new(db, &root)?; + generate_proof(&trie, keys) } /// Verify a set of key-value pairs against a trie root and a proof. @@ -256,8 +245,6 @@ pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, delta: I, - recorder: Option<&mut dyn trie_db::TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, ) -> Result, Box>> where I: IntoIterator, @@ -267,10 +254,7 @@ where DB: hash_db::HashDB, { { - let mut trie = TrieDBMutBuilder::::from_existing(db, &mut root) - .with_optional_cache(cache) - .with_optional_recorder(recorder) - .build(); + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -287,32 +271,33 @@ where } /// Read a value from the trie. -pub fn read_trie_value>( +pub fn read_trie_value( db: &DB, root: &TrieHash, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result>, Box>> { - TrieDBBuilder::::new(db, root) - .with_optional_cache(cache) - .with_optional_recorder(recorder) - .build() - .get(key) +) -> Result>, Box>> +where + L: TrieConfiguration, + DB: hash_db::HashDBRef, +{ + TrieDB::::new(db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with< - L: TrieLayout, - Q: Query>, - DB: hash_db::HashDBRef, ->( +pub fn read_trie_value_with( db: &DB, root: &TrieHash, key: &[u8], query: Q, -) -> Result>, Box>> { - TrieDBBuilder::::new(db, root).build().get_with(key, query) +) -> Result>, Box>> +where + L: TrieConfiguration, + Q: Query, + DB: hash_db::HashDBRef, +{ + TrieDB::::new(db, root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec())) } /// Determine the empty trie root. @@ -343,8 +328,6 @@ pub fn child_delta_trie_root( db: &mut DB, root_data: RD, delta: I, - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, ) -> Result<::Out, Box>> where I: IntoIterator, @@ -358,49 +341,46 @@ where // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); - let mut db = KeySpacedDBMut::new(db, keyspace); - delta_trie_root::(&mut db, root, delta, recorder, cache) + let mut db = KeySpacedDBMut::new(&mut *db, keyspace); + delta_trie_root::(&mut db, root, delta) } -/// Read a value from the child trie. -pub fn read_child_trie_value( - keyspace: &[u8], +/// Record all keys for a given root. +pub fn record_all_keys( db: &DB, root: &TrieHash, - key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result>, Box>> + recorder: &mut Recorder>, +) -> Result<(), Box>> where DB: hash_db::HashDBRef, { - let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) - .with_optional_recorder(recorder) - .with_optional_cache(cache) - .build() - .get(key) - .map(|x| x.map(|val| val.to_vec())) + let trie = TrieDB::::new(db, root)?; + let iter = trie.iter()?; + + for x in iter { + let (key, _) = x?; + + // there's currently no API like iter_with() + // => use iter to enumerate all keys AND lookup each + // key using get_with + trie.get_with(&key, &mut *recorder)?; + } + + Ok(()) } -/// Read a hash from the child trie. -pub fn read_child_trie_hash( +/// Read a value from the child trie. +pub fn read_child_trie_value( keyspace: &[u8], db: &DB, root: &TrieHash, key: &[u8], - recorder: Option<&mut dyn TrieRecorder>>, - cache: Option<&mut dyn TrieCache>, -) -> Result>, Box>> +) -> Result>, Box>> where DB: hash_db::HashDBRef, { let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) - .with_optional_recorder(recorder) - .with_optional_cache(cache) - .build() - .get_hash(key) + TrieDB::::new(&db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the child trie with given query. @@ -421,21 +401,20 @@ where root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(db, keyspace); - TrieDBBuilder::::new(&db, &root) - .build() + TrieDB::::new(&db, &root)? .get_with(key, query) .map(|x| x.map(|val| val.to_vec())) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. -pub struct KeySpacedDB<'a, DB: ?Sized, H>(&'a DB, &'a [u8], PhantomData); +pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); /// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. /// /// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB: ?Sized, H>(&'a mut DB, &'a [u8], PhantomData); +pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. @@ -446,14 +425,20 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) (result, prefix.1) } -impl<'a, DB: ?Sized, H> KeySpacedDB<'a, DB, H> { +impl<'a, DB, H> KeySpacedDB<'a, DB, H> +where + H: Hasher, +{ /// instantiate new keyspaced db pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { KeySpacedDB(db, ks, PhantomData) } } -impl<'a, DB: ?Sized, H> KeySpacedDBMut<'a, DB, H> { +impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> +where + H: Hasher, +{ /// instantiate new keyspaced db pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { KeySpacedDBMut(db, ks, PhantomData) @@ -462,7 +447,7 @@ impl<'a, DB: ?Sized, H> KeySpacedDBMut<'a, DB, H> { impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef + ?Sized, + DB: hash_db::HashDBRef, H: Hasher, T: From<&'static [u8]>, { @@ -527,6 +512,7 @@ where /// Constants used into trie simplification codec. mod trie_constants { const FIRST_PREFIX: u8 = 0b_00 << 6; + pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; @@ -541,6 +527,7 @@ mod tests { use super::*; use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; + use hex_literal::hex; use sp_core::Blake2Hasher; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; @@ -563,7 +550,7 @@ mod tests { let persistent = { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); - let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -577,13 +564,13 @@ mod tests { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); { - let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDBBuilder::::new(&memdb, &root).build(); + let t = TrieDB::::new(&memdb, &root).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter() @@ -605,7 +592,7 @@ mod tests { fn default_trie_root() { let mut db = MemoryDB::default(); let mut root = TrieHash::::default(); - let mut empty = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); let root2: Vec = LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()) @@ -708,12 +695,15 @@ mod tests { check_input(&input); } - fn populate_trie<'db, T: TrieConfiguration>( + fn populate_trie<'db, T>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, v: &[(Vec, Vec)], - ) -> TrieDBMut<'db, T> { - let mut t = TrieDBMutBuilder::::new(db, root).build(); + ) -> TrieDBMut<'db, T> + where + T: TrieConfiguration, + { + let mut t = TrieDBMut::::new(db, root); for i in 0..v.len() { let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; @@ -843,27 +833,21 @@ mod tests { } fn iterator_works_inner() { let pairs = vec![ - ( - array_bytes::hex2bytes_unchecked("0103000000000000000464"), - array_bytes::hex2bytes_unchecked("0400000000"), - ), - ( - array_bytes::hex2bytes_unchecked("0103000000000000000469"), - array_bytes::hex2bytes_unchecked("0401000000"), - ), + (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), + (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), ]; let mut mdb = MemoryDB::default(); let mut root = Default::default(); let _ = populate_trie::(&mut mdb, &mut root, &pairs); - let trie = TrieDBBuilder::::new(&mdb, &root).build(); + let trie = TrieDB::::new(&mdb, &root).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); for pair in iter { let (key, value) = pair.unwrap(); - iter_pairs.push((key, value)); + iter_pairs.push((key, value.to_vec())); } assert_eq!(pairs, iter_pairs); @@ -872,15 +856,15 @@ mod tests { #[test] fn proof_non_inclusion_works() { let pairs = vec![ - (array_bytes::hex2bytes_unchecked("0102"), array_bytes::hex2bytes_unchecked("01")), - (array_bytes::hex2bytes_unchecked("0203"), array_bytes::hex2bytes_unchecked("0405")), + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), ]; let mut memdb = MemoryDB::default(); let mut root = Default::default(); populate_trie::(&mut memdb, &mut root, &pairs); - let non_included_key: Vec = array_bytes::hex2bytes_unchecked("0909"); + let non_included_key: Vec = hex!("0909").to_vec(); let proof = generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) .unwrap(); @@ -897,7 +881,7 @@ mod tests { assert!(verify_trie_proof::>( &root, &proof, - &[(non_included_key, Some(array_bytes::hex2bytes_unchecked("1010")))], + &[(non_included_key, Some(hex!("1010").to_vec()))], ) .is_err()); } @@ -905,8 +889,8 @@ mod tests { #[test] fn proof_inclusion_works() { let pairs = vec![ - (array_bytes::hex2bytes_unchecked("0102"), array_bytes::hex2bytes_unchecked("01")), - (array_bytes::hex2bytes_unchecked("0203"), array_bytes::hex2bytes_unchecked("0405")), + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), ]; let mut memdb = MemoryDB::default(); @@ -936,7 +920,7 @@ mod tests { assert!(verify_trie_proof::( &root, &proof, - &[(array_bytes::hex2bytes_unchecked("4242"), Some(pairs[0].1.clone()))] + &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] ) .is_err()); @@ -970,45 +954,15 @@ mod tests { &mut proof_db.clone(), storage_root, valid_delta, - None, - None, ) .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, - None, - None, ) .unwrap(); assert_eq!(first_storage_root, second_storage_root); } - - #[test] - fn big_key() { - let check = |keysize: usize| { - let mut memdb = PrefixedMemoryDB::::default(); - let mut root = Default::default(); - let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); - t.insert(&vec![0x01u8; keysize][..], &[0x01u8, 0x23]).unwrap(); - std::mem::drop(t); - let t = TrieDBBuilder::::new(&memdb, &root).build(); - assert_eq!(t.get(&vec![0x01u8; keysize][..]).unwrap(), Some(vec![0x01u8, 0x23])); - }; - check(u16::MAX as usize / 2); // old limit - check(u16::MAX as usize / 2 + 1); // value over old limit still works - } - - #[test] - fn node_with_no_children_fail_decoding() { - let branch = NodeCodec::::branch_node_nibbled( - b"some_partial".iter().copied(), - 24, - vec![None; 16].into_iter(), - Some(trie_db::node::Value::Inline(b"value"[..].into())), - ); - assert!(NodeCodec::::decode(branch.as_slice()).is_err()); - } } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index f632320dd296d..bd0ba27483e66 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -25,7 +25,7 @@ use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; use trie_db::{ nibble_ops, node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, - ChildReference, NodeCodec as NodeCodecT, + ChildReference, NodeCodec as NodeCodecT, Partial, }; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while @@ -85,7 +85,7 @@ where H: Hasher, { const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); - type Error = Error; + type Error = Error; type HashOut = H::Out; fn hashed_null_node() -> ::Out { @@ -185,19 +185,19 @@ where &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: impl Iterator, number_nibble: usize, value: Value) -> Vec { + fn leaf_node(partial: Partial, value: Value) -> Vec { let contains_hash = matches!(&value, Value::Node(..)); let mut output = if contains_hash { - partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf) + partial_encode(partial, NodeKind::HashedValueLeaf) } else { - partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf) + partial_encode(partial, NodeKind::Leaf) }; match value { Value::Inline(value) => { Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Value::Node(hash) => { + Value::Node(hash, _) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -244,7 +244,7 @@ where Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Some(Value::Node(hash)) => { + Some(Value::Node(hash, _)) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -279,6 +279,8 @@ fn partial_from_iterator_encode>( nibble_count: usize, node_kind: NodeKind, ) -> Vec { + let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); + let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); match node_kind { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), @@ -293,6 +295,31 @@ fn partial_from_iterator_encode>( output } +/// Encode and allocate node type header (type and size), and partial value. +/// Same as `partial_from_iterator_encode` but uses non encoded `Partial` as input. +fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { + let number_nibble_encoded = (partial.0).0 as usize; + let nibble_count = partial.1.len() * nibble_ops::NIBBLE_PER_BYTE + number_nibble_encoded; + + let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); + + let mut output = Vec::with_capacity(4 + partial.1.len()); + match node_kind { + NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), + NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), + NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + NodeKind::HashedValueLeaf => + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), + NodeKind::HashedValueBranch => + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), + }; + if number_nibble_encoded > 0 { + output.push(nibble_ops::pad_right((partial.0).1)); + } + output.extend_from_slice(partial.1); + output +} + const BITMAP_LENGTH: usize = 2; /// Radix 16 trie, bitmap encoding implementation, @@ -302,13 +329,8 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(data: &[u8]) -> Result { - let value = u16::decode(&mut &data[..])?; - if value == 0 { - Err("Bitmap without a child.".into()) - } else { - Ok(Bitmap(value)) - } + pub fn decode(mut data: &[u8]) -> Result { + Ok(Bitmap(u16::decode(&mut data)?)) } pub fn value_at(&self, i: usize) -> bool { diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index f3544be65b2e9..c2c9510c5ac43 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -117,6 +117,8 @@ pub(crate) fn size_and_prefix_iterator( prefix: u8, prefix_mask: usize, ) -> impl Iterator { + let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); + let max_value = 255u8 >> prefix_mask; let l1 = sp_std::cmp::min((max_value as usize).saturating_sub(1), size); let (first_byte, mut rem) = if size == l1 { @@ -163,11 +165,12 @@ fn decode_size( return Ok(result) } result -= 1; - loop { + while result <= trie_constants::NIBBLE_SIZE_BOUND { let n = input.read_byte()? as usize; if n < 255 { return Ok(result + n + 1) } result += 255; } + Ok(trie_constants::NIBBLE_SIZE_BOUND) } diff --git a/primitives/trie/src/recorder.rs b/primitives/trie/src/recorder.rs deleted file mode 100644 index bc67cfc287942..0000000000000 --- a/primitives/trie/src/recorder.rs +++ /dev/null @@ -1,302 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Trie recorder -//! -//! Provides an implementation of the [`TrieRecorder`](trie_db::TrieRecorder) trait. It can be used -//! to record storage accesses to the state to generate a [`StorageProof`]. - -use crate::{NodeCodec, StorageProof}; -use codec::Encode; -use hash_db::Hasher; -use parking_lot::Mutex; -use std::{ - collections::HashMap, - marker::PhantomData, - mem, - ops::DerefMut, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; -use trie_db::{RecordedForKey, TrieAccess}; - -const LOG_TARGET: &str = "trie-recorder"; - -/// The internals of [`Recorder`]. -struct RecorderInner { - /// The keys for that we have recorded the trie nodes and if we have recorded up to the value. - recorded_keys: HashMap, RecordedForKey>>, - /// The encoded nodes we accessed while recording. - accessed_nodes: HashMap>, -} - -impl Default for RecorderInner { - fn default() -> Self { - Self { recorded_keys: Default::default(), accessed_nodes: Default::default() } - } -} - -/// The trie recorder. -/// -/// It can be used to record accesses to the trie and then to convert them into a [`StorageProof`]. -pub struct Recorder { - inner: Arc>>, - /// The estimated encoded size of the storage proof this recorder will produce. - /// - /// We store this in an atomic to be able to fetch the value while the `inner` is may locked. - encoded_size_estimation: Arc, -} - -impl Default for Recorder { - fn default() -> Self { - Self { inner: Default::default(), encoded_size_estimation: Arc::new(0.into()) } - } -} - -impl Clone for Recorder { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - encoded_size_estimation: self.encoded_size_estimation.clone(), - } - } -} - -impl Recorder { - /// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type. - /// - /// - `storage_root`: The storage root of the trie for which accesses are recorded. This is - /// important when recording access to different tries at once (like top and child tries). - pub fn as_trie_recorder( - &self, - storage_root: H::Out, - ) -> impl trie_db::TrieRecorder + '_ { - TrieRecorder:: { - inner: self.inner.lock(), - storage_root, - encoded_size_estimation: self.encoded_size_estimation.clone(), - _phantom: PhantomData, - } - } - - /// Drain the recording into a [`StorageProof`]. - /// - /// While a recorder can be cloned, all share the same internal state. After calling this - /// function, all other instances will have their internal state reset as well. - /// - /// If you don't want to drain the recorded state, use [`Self::to_storage_proof`]. - /// - /// Returns the [`StorageProof`]. - pub fn drain_storage_proof(self) -> StorageProof { - let mut recorder = mem::take(&mut *self.inner.lock()); - StorageProof::new(recorder.accessed_nodes.drain().map(|(_, v)| v)) - } - - /// Convert the recording to a [`StorageProof`]. - /// - /// In contrast to [`Self::drain_storage_proof`] this doesn't consumes and doesn't clears the - /// recordings. - /// - /// Returns the [`StorageProof`]. - pub fn to_storage_proof(&self) -> StorageProof { - let recorder = self.inner.lock(); - StorageProof::new(recorder.accessed_nodes.values().cloned()) - } - - /// Returns the estimated encoded size of the proof. - /// - /// The estimation is based on all the nodes that were accessed until now while - /// accessing the trie. - pub fn estimate_encoded_size(&self) -> usize { - self.encoded_size_estimation.load(Ordering::Relaxed) - } - - /// Reset the state. - /// - /// This discards all recorded data. - pub fn reset(&self) { - mem::take(&mut *self.inner.lock()); - self.encoded_size_estimation.store(0, Ordering::Relaxed); - } -} - -/// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. -struct TrieRecorder { - inner: I, - storage_root: H::Out, - encoded_size_estimation: Arc, - _phantom: PhantomData, -} - -impl>> trie_db::TrieRecorder - for TrieRecorder -{ - fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { - let mut encoded_size_update = 0; - - match access { - TrieAccess::NodeOwned { hash, node_owned } => { - tracing::trace!( - target: LOG_TARGET, - hash = ?hash, - "Recording node", - ); - - self.inner.accessed_nodes.entry(hash).or_insert_with(|| { - let node = node_owned.to_encoded::>(); - - encoded_size_update += node.encoded_size(); - - node - }); - }, - TrieAccess::EncodedNode { hash, encoded_node } => { - tracing::trace!( - target: LOG_TARGET, - hash = ?hash, - "Recording node", - ); - - self.inner.accessed_nodes.entry(hash).or_insert_with(|| { - let node = encoded_node.into_owned(); - - encoded_size_update += node.encoded_size(); - - node - }); - }, - TrieAccess::Value { hash, value, full_key } => { - tracing::trace!( - target: LOG_TARGET, - hash = ?hash, - key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), - "Recording value", - ); - - self.inner.accessed_nodes.entry(hash).or_insert_with(|| { - let value = value.into_owned(); - - encoded_size_update += value.encoded_size(); - - value - }); - - self.inner - .recorded_keys - .entry(self.storage_root) - .or_default() - .entry(full_key.to_vec()) - .and_modify(|e| *e = RecordedForKey::Value) - .or_insert(RecordedForKey::Value); - }, - TrieAccess::Hash { full_key } => { - tracing::trace!( - target: LOG_TARGET, - key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), - "Recorded hash access for key", - ); - - // We don't need to update the `encoded_size_update` as the hash was already - // accounted for by the recorded node that holds the hash. - self.inner - .recorded_keys - .entry(self.storage_root) - .or_default() - .entry(full_key.to_vec()) - .or_insert(RecordedForKey::Hash); - }, - TrieAccess::NonExisting { full_key } => { - tracing::trace!( - target: LOG_TARGET, - key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), - "Recorded non-existing value access for key", - ); - - // Non-existing access means we recorded all trie nodes up to the value. - // Not the actual value, as it doesn't exist, but all trie nodes to know - // that the value doesn't exist in the trie. - self.inner - .recorded_keys - .entry(self.storage_root) - .or_default() - .entry(full_key.to_vec()) - .and_modify(|e| *e = RecordedForKey::Value) - .or_insert(RecordedForKey::Value); - }, - }; - - self.encoded_size_estimation.fetch_add(encoded_size_update, Ordering::Relaxed); - } - - fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { - self.inner - .recorded_keys - .get(&self.storage_root) - .and_then(|k| k.get(key).copied()) - .unwrap_or(RecordedForKey::None) - } -} - -#[cfg(test)] -mod tests { - use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; - - type MemoryDB = crate::MemoryDB; - type Layout = crate::LayoutV1; - type Recorder = super::Recorder; - - const TEST_DATA: &[(&[u8], &[u8])] = - &[(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3"), (b"key4", b"val4")]; - - fn create_trie() -> (MemoryDB, TrieHash) { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } - } - - (db, root) - } - - #[test] - fn recorder_works() { - let (db, root) = create_trie(); - - let recorder = Recorder::default(); - - { - let mut trie_recorder = recorder.as_trie_recorder(root); - let trie = TrieDBBuilder::::new(&db, &root) - .with_recorder(&mut trie_recorder) - .build(); - assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); - } - - let storage_proof = recorder.drain_storage_proof(); - let memory_db: MemoryDB = storage_proof.into_memory_db(); - - // Check that we recorded the required data - let trie = TrieDBBuilder::::new(&memory_db, &root).build(); - assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); - } -} diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 5351e8de6fd82..f6139584dbbad 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,11 +18,7 @@ use codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use scale_info::TypeInfo; -use sp_std::{ - collections::btree_set::BTreeSet, - iter::{DoubleEndedIterator, IntoIterator}, - vec::Vec, -}; +use sp_std::{collections::btree_set::BTreeSet, iter::IntoIterator, vec::Vec}; // Note that `LayoutV1` usage here (proof compaction) is compatible // with `LayoutV0`. use crate::LayoutV1 as Layout; @@ -58,16 +54,10 @@ impl StorageProof { self.trie_nodes.is_empty() } - /// Convert into an iterator over encoded trie nodes in lexicographical order constructed - /// from the proof. - pub fn into_iter_nodes(self) -> impl Sized + DoubleEndedIterator> { - self.trie_nodes.into_iter() - } - /// Create an iterator over encoded trie nodes in lexicographical order constructed /// from the proof. - pub fn iter_nodes(&self) -> impl Sized + DoubleEndedIterator> { - self.trie_nodes.iter() + pub fn iter_nodes(self) -> StorageProofNodeIterator { + StorageProofNodeIterator::new(self) } /// Convert into plain node vector. @@ -80,19 +70,14 @@ impl StorageProof { self.into() } - /// Creates a [`MemoryDB`](crate::MemoryDB) from `Self` reference. - pub fn to_memory_db(&self) -> crate::MemoryDB { - self.into() - } - /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the /// input proofs due to deduplication of trie nodes. pub fn merge(proofs: impl IntoIterator) -> Self { let trie_nodes = proofs .into_iter() - .flat_map(|proof| proof.into_iter_nodes()) - .collect::>() + .flat_map(|proof| proof.iter_nodes()) + .collect::>() .into_iter() .collect(); @@ -103,18 +88,8 @@ impl StorageProof { pub fn into_compact_proof( self, root: H::Out, - ) -> Result>> { - let db = self.into_memory_db(); - crate::encode_compact::, crate::MemoryDB>(&db, &root) - } - - /// Encode as a compact proof with default trie layout. - pub fn to_compact_proof( - &self, - root: H::Out, - ) -> Result>> { - let db = self.to_memory_db(); - crate::encode_compact::, crate::MemoryDB>(&db, &root) + ) -> Result> { + crate::encode_compact::>(self, root) } /// Returns the estimated encoded size of the compact proof. @@ -131,12 +106,6 @@ impl StorageProof { impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { - From::from(&proof) - } -} - -impl From<&StorageProof> for crate::MemoryDB { - fn from(proof: &StorageProof) -> Self { let mut db = crate::MemoryDB::default(); proof.iter_nodes().for_each(|n| { db.insert(crate::EMPTY_PREFIX, &n); @@ -161,7 +130,7 @@ impl CompactProof { pub fn to_storage_proof( &self, expected_root: Option<&H::Out>, - ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { + ) -> Result<(StorageProof, H::Out), crate::CompactProofError> { let mut db = crate::MemoryDB::::new(&[]); let root = crate::decode_compact::, _, _>( &mut db, @@ -188,8 +157,7 @@ impl CompactProof { pub fn to_memory_db( &self, expected_root: Option<&H::Out>, - ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError>> - { + ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError> { let mut db = crate::MemoryDB::::new(&[]); let root = crate::decode_compact::, _, _>( &mut db, @@ -200,3 +168,23 @@ impl CompactProof { Ok((db, root)) } } + +/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to +/// be traversed in any particular order. +pub struct StorageProofNodeIterator { + inner: > as IntoIterator>::IntoIter, +} + +impl StorageProofNodeIterator { + fn new(proof: StorageProof) -> Self { + StorageProofNodeIterator { inner: proof.trie_nodes.into_iter() } + } +} + +impl Iterator for StorageProofNodeIterator { + type Item = Vec; + + fn next(&mut self) -> Option { + self.inner.next() + } +} diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index d5ae9a43fb1eb..d29f5a98f31b9 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -20,7 +20,7 @@ //! This uses compact proof from trie crate and extends //! it to substrate specific layout and child trie system. -use crate::{CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX}; +use crate::{CompactProof, HashDBT, StorageProof, TrieConfiguration, TrieHash, EMPTY_PREFIX}; use sp_std::{boxed::Box, vec::Vec}; use trie_db::{CError, Trie}; @@ -78,7 +78,7 @@ where let mut child_tries = Vec::new(); { // fetch child trie roots - let trie = crate::TrieDBBuilder::::new(db, &top_root).build(); + let trie = crate::TrieDB::::new(db, &top_root)?; let mut iter = trie.iter()?; @@ -149,17 +149,17 @@ where /// Then parse all child trie root and compress main trie content first /// then all child trie contents. /// Child trie are ordered by the order of their roots in the top trie. -pub fn encode_compact( - partial_db: &DB, - root: &TrieHash, +pub fn encode_compact( + proof: StorageProof, + root: TrieHash, ) -> Result, CError>> where L: TrieConfiguration, - DB: HashDBT + hash_db::HashDBRef, { let mut child_tries = Vec::new(); + let partial_db = proof.into_memory_db(); let mut compact_proof = { - let trie = crate::TrieDBBuilder::::new(partial_db, root).build(); + let trie = crate::TrieDB::::new(&partial_db, &root)?; let mut iter = trie.iter()?; @@ -191,13 +191,13 @@ where }; for child_root in child_tries { - if !HashDBT::::contains(partial_db, &child_root, EMPTY_PREFIX) { + if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). continue } - let trie = crate::TrieDBBuilder::::new(partial_db, &child_root).build(); + let trie = crate::TrieDB::::new(&partial_db, &child_root)?; let child_proof = trie_db::encode_compact::(&trie)?; compact_proof.extend(child_proof); diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 435e6a986722e..ca798db47b552 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -54,7 +54,8 @@ fn branch_node_bit_mask(has_children: impl Iterator) -> (u8, u8) { /// Create a leaf/branch node, encoding a number of nibbles. fn fuse_nibbles_node(nibbles: &[u8], kind: NodeKind) -> impl Iterator + '_ { - let size = nibbles.len(); + let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len()); + let iter_start = match kind { NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), NodeKind::BranchNoValue => diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 0dcbbd81fd93f..0ca78940fbbbc 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-serde = { version = "0.4.0", optional = true } -parity-wasm = { version = "0.45", optional = true } +impl-serde = { version = "0.3.1", optional = true } +parity-wasm = { version = "0.42.2", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index d61c74f20222c..ee6ce51efdc02 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } -wasmi = { version = "0.13", optional = true } -wasmtime = { version = "1.0.0", default-features = false, optional = true } +wasmi = { version = "0.9.1", optional = true } +wasmtime = { version = "0.38.0", default-features = false, optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [features] diff --git a/primitives/weights/Cargo.toml b/primitives/weights/Cargo.toml deleted file mode 100644 index 8c0302ff5d1b2..0000000000000 --- a/primitives/weights/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "sp-weights" -version = "4.0.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Types and traits for interfacing between the host and the wasm runtime." -documentation = "https://docs.rs/sp-wasm-interface" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", optional = true, features = ["derive"] } -smallvec = "1.8.0" -sp-arithmetic = { version = "5.0.0", default-features = false, path = "../arithmetic" } -sp-core = { version = "6.0.0", default-features = false, path = "../core" } -sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } -sp-std = { version = "4.0.0", default-features = false, path = "../std" } - -[features] -default = [ "std" ] -std = [ - "codec/std", - "scale-info/std", - "serde", - "sp-arithmetic/std", - "sp-core/std", - "sp-debug-derive/std", - "sp-std/std" -] -# By default some types have documentation, `full-metadata-docs` allows to add documentation to -# more types in the metadata. -full-metadata-docs = ["scale-info/docs"] diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs deleted file mode 100644 index af9e730fbfefd..0000000000000 --- a/primitives/weights/src/lib.rs +++ /dev/null @@ -1,322 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Primitives for transaction weighting. -//! -//! Latest machine specification used to benchmark are: -//! - Digital Ocean: ubuntu-s-2vcpu-4gb-ams3-01 -//! - 2x Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz -//! - 4GB RAM -//! - Ubuntu 19.10 (GNU/Linux 5.3.0-18-generic x86_64) -//! - rustc 1.42.0 (b8cedc004 2020-03-09) - -#![cfg_attr(not(feature = "std"), no_std)] - -extern crate self as sp_weights; - -mod weight_meter; -mod weight_v2; - -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use smallvec::SmallVec; -use sp_arithmetic::{ - traits::{BaseArithmetic, SaturatedConversion, Saturating, Unsigned}, - Perbill, -}; -use sp_core::Get; -use sp_debug_derive::RuntimeDebug; - -pub use weight_meter::*; -pub use weight_v2::*; - -pub mod constants { - use super::Weight; - - pub const WEIGHT_PER_SECOND: Weight = Weight::from_ref_time(1_000_000_000_000); - pub const WEIGHT_PER_MILLIS: Weight = Weight::from_ref_time(1_000_000_000); - pub const WEIGHT_PER_MICROS: Weight = Weight::from_ref_time(1_000_000); - pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); -} - -/// The old weight type. -/// -/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other -/// cases. -#[derive( - Decode, - Encode, - CompactAs, - PartialEq, - Eq, - Clone, - Copy, - RuntimeDebug, - Default, - MaxEncodedLen, - TypeInfo, -)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(transparent))] -pub struct OldWeight(pub u64); - -/// The weight of database operations that the runtime can invoke. -/// -/// NOTE: This is currently only measured in computational time, and will probably -/// be updated all together once proof size is accounted for. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct RuntimeDbWeight { - pub read: u64, - pub write: u64, -} - -impl RuntimeDbWeight { - pub fn reads(self, r: u64) -> Weight { - Weight::from_ref_time(self.read.saturating_mul(r)) - } - - pub fn writes(self, w: u64) -> Weight { - Weight::from_ref_time(self.write.saturating_mul(w)) - } - - pub fn reads_writes(self, r: u64, w: u64) -> Weight { - let read_weight = self.read.saturating_mul(r); - let write_weight = self.write.saturating_mul(w); - Weight::from_ref_time(read_weight.saturating_add(write_weight)) - } -} - -/// One coefficient and its position in the `WeightToFee`. -/// -/// One term of polynomial is calculated as: -/// -/// ```ignore -/// coeff_integer * x^(degree) + coeff_frac * x^(degree) -/// ``` -/// -/// The `negative` value encodes whether the term is added or subtracted from the -/// overall polynomial result. -#[derive(Clone, Encode, Decode, TypeInfo)] -pub struct WeightToFeeCoefficient { - /// The integral part of the coefficient. - pub coeff_integer: Balance, - /// The fractional part of the coefficient. - pub coeff_frac: Perbill, - /// True iff the coefficient should be interpreted as negative. - pub negative: bool, - /// Degree/exponent of the term. - pub degree: u8, -} - -/// A list of coefficients that represent one polynomial. -pub type WeightToFeeCoefficients = SmallVec<[WeightToFeeCoefficient; 4]>; - -/// A trait that describes the weight to fee calculation. -pub trait WeightToFee { - /// The type that is returned as result from calculation. - type Balance: BaseArithmetic + From + Copy + Unsigned; - - /// Calculates the fee from the passed `weight`. - fn weight_to_fee(weight: &Weight) -> Self::Balance; -} - -/// A trait that describes the weight to fee calculation as polynomial. -/// -/// An implementor should only implement the `polynomial` function. -pub trait WeightToFeePolynomial { - /// The type that is returned as result from polynomial evaluation. - type Balance: BaseArithmetic + From + Copy + Unsigned; - - /// Returns a polynomial that describes the weight to fee conversion. - /// - /// This is the only function that should be manually implemented. Please note - /// that all calculation is done in the probably unsigned `Balance` type. This means - /// that the order of coefficients is important as putting the negative coefficients - /// first will most likely saturate the result to zero mid evaluation. - fn polynomial() -> WeightToFeeCoefficients; -} - -impl WeightToFee for T -where - T: WeightToFeePolynomial, -{ - type Balance = ::Balance; - - /// Calculates the fee from the passed `weight` according to the `polynomial`. - /// - /// This should not be overridden in most circumstances. Calculation is done in the - /// `Balance` type and never overflows. All evaluation is saturating. - fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::polynomial() - .iter() - .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(weight.ref_time()) - .saturating_pow(args.degree.into()); - - // The sum could get negative. Therefore we only sum with the accumulator. - // The Perbill Mul implementation is non overflowing. - let frac = args.coeff_frac * w; - let integer = args.coeff_integer.saturating_mul(w); - - if args.negative { - acc = acc.saturating_sub(frac); - acc = acc.saturating_sub(integer); - } else { - acc = acc.saturating_add(frac); - acc = acc.saturating_add(integer); - } - - acc - }) - } -} - -/// Implementor of `WeightToFee` that maps one unit of weight to one unit of fee. -pub struct IdentityFee(sp_std::marker::PhantomData); - -impl WeightToFee for IdentityFee -where - T: BaseArithmetic + From + Copy + Unsigned, -{ - type Balance = T; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()) - } -} - -/// Implementor of [`WeightToFee`] that uses a constant multiplier. -/// # Example -/// -/// ``` -/// # use sp_core::ConstU128; -/// # use sp_weights::ConstantMultiplier; -/// // Results in a multiplier of 10 for each unit of weight (or length) -/// type LengthToFee = ConstantMultiplier::>; -/// ``` -pub struct ConstantMultiplier(sp_std::marker::PhantomData<(T, M)>); - -impl WeightToFee for ConstantMultiplier -where - T: BaseArithmetic + From + Copy + Unsigned, - M: Get, -{ - type Balance = T; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()).saturating_mul(M::get()) - } -} - -#[cfg(test)] -#[allow(dead_code)] -mod tests { - use super::*; - use smallvec::smallvec; - - type Balance = u64; - - // 0.5x^3 + 2.333x^2 + 7x - 10_000 - struct Poly; - impl WeightToFeePolynomial for Poly { - type Balance = Balance; - - fn polynomial() -> WeightToFeeCoefficients { - smallvec![ - WeightToFeeCoefficient { - coeff_integer: 0, - coeff_frac: Perbill::from_float(0.5), - negative: false, - degree: 3 - }, - WeightToFeeCoefficient { - coeff_integer: 2, - coeff_frac: Perbill::from_rational(1u32, 3u32), - negative: false, - degree: 2 - }, - WeightToFeeCoefficient { - coeff_integer: 7, - coeff_frac: Perbill::zero(), - negative: false, - degree: 1 - }, - WeightToFeeCoefficient { - coeff_integer: 10_000, - coeff_frac: Perbill::zero(), - negative: true, - degree: 0 - }, - ] - } - } - - #[test] - fn polynomial_works() { - // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 - assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(100)), 514033); - // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 - assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10_123)), 518917034928); - } - - #[test] - fn polynomial_does_not_underflow() { - assert_eq!(Poly::weight_to_fee(&Weight::zero()), 0); - assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10)), 0); - } - - #[test] - fn polynomial_does_not_overflow() { - assert_eq!(Poly::weight_to_fee(&Weight::MAX), Balance::max_value() - 10_000); - } - - #[test] - fn identity_fee_works() { - assert_eq!(IdentityFee::::weight_to_fee(&Weight::zero()), 0); - assert_eq!(IdentityFee::::weight_to_fee(&Weight::from_ref_time(50)), 50); - assert_eq!(IdentityFee::::weight_to_fee(&Weight::MAX), Balance::max_value()); - } - - #[test] - fn constant_fee_works() { - use sp_core::ConstU128; - assert_eq!( - ConstantMultiplier::>::weight_to_fee(&Weight::zero()), - 0 - ); - assert_eq!( - ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( - 50 - )), - 500 - ); - assert_eq!( - ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( - 16 - )), - 16384 - ); - assert_eq!( - ConstantMultiplier::>::weight_to_fee( - &Weight::from_ref_time(2) - ), - u128::MAX - ); - } -} diff --git a/primitives/weights/src/weight_meter.rs b/primitives/weights/src/weight_meter.rs deleted file mode 100644 index d03e72968bb09..0000000000000 --- a/primitives/weights/src/weight_meter.rs +++ /dev/null @@ -1,176 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Contains the `WeightMeter` primitive to meter weight usage. - -use super::Weight; - -use sp_arithmetic::Perbill; - -/// Meters consumed weight and a hard limit for the maximal consumable weight. -/// -/// Can be used to check if enough weight for an operation is available before committing to it. -/// -/// # Example -/// -/// ```rust -/// use sp_weights::{Weight, WeightMeter}; -/// -/// // The weight is limited to (10, 0). -/// let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 0)); -/// // There is enough weight remaining for an operation with (5, 0) weight. -/// assert!(meter.check_accrue(Weight::from_parts(5, 0))); -/// // There is not enough weight remaining for an operation with (6, 0) weight. -/// assert!(!meter.check_accrue(Weight::from_parts(6, 0))); -/// ``` -#[derive(Debug, Clone)] -pub struct WeightMeter { - /// The already consumed weight. - pub consumed: Weight, - - /// The maximal consumable weight. - pub limit: Weight, -} - -impl WeightMeter { - /// Creates [`Self`] from a limit for the maximal consumable weight. - pub fn from_limit(limit: Weight) -> Self { - Self { consumed: Weight::zero(), limit } - } - - /// Creates [`Self`] with the maximal possible limit for the consumable weight. - pub fn max_limit() -> Self { - Self::from_limit(Weight::MAX) - } - - /// The remaining weight that can still be consumed. - pub fn remaining(&self) -> Weight { - self.limit.saturating_sub(self.consumed) - } - - /// The ratio of consumed weight to the limit. - /// - /// Calculates one ratio per component and returns the largest. - pub fn consumed_ratio(&self) -> Perbill { - let time = Perbill::from_rational(self.consumed.ref_time(), self.limit.ref_time()); - let pov = Perbill::from_rational(self.consumed.proof_size(), self.limit.proof_size()); - time.max(pov) - } - - /// Consume the given weight after checking that it can be consumed. Otherwise do nothing. - pub fn check_accrue(&mut self, w: Weight) -> bool { - self.consumed.checked_add(&w).map_or(false, |test| { - if test.any_gt(self.limit) { - false - } else { - self.consumed = test; - true - } - }) - } - - /// Check if the given weight can be consumed. - pub fn can_accrue(&self, w: Weight) -> bool { - self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) - } -} - -#[cfg(test)] -mod tests { - use crate::*; - - #[test] - fn weight_meter_remaining_works() { - let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 20)); - - assert!(meter.check_accrue(Weight::from_parts(5, 0))); - assert_eq!(meter.consumed, Weight::from_parts(5, 0)); - assert_eq!(meter.remaining(), Weight::from_parts(5, 20)); - - assert!(meter.check_accrue(Weight::from_parts(2, 10))); - assert_eq!(meter.consumed, Weight::from_parts(7, 10)); - assert_eq!(meter.remaining(), Weight::from_parts(3, 10)); - - assert!(meter.check_accrue(Weight::from_parts(3, 10))); - assert_eq!(meter.consumed, Weight::from_parts(10, 20)); - assert_eq!(meter.remaining(), Weight::from_parts(0, 0)); - } - - #[test] - fn weight_meter_can_accrue_works() { - let meter = WeightMeter::from_limit(Weight::from_parts(1, 1)); - - assert!(meter.can_accrue(Weight::from_parts(0, 0))); - assert!(meter.can_accrue(Weight::from_parts(1, 1))); - assert!(!meter.can_accrue(Weight::from_parts(0, 2))); - assert!(!meter.can_accrue(Weight::from_parts(2, 0))); - assert!(!meter.can_accrue(Weight::from_parts(2, 2))); - } - - #[test] - fn weight_meter_check_accrue_works() { - let mut meter = WeightMeter::from_limit(Weight::from_parts(2, 2)); - - assert!(meter.check_accrue(Weight::from_parts(0, 0))); - assert!(meter.check_accrue(Weight::from_parts(1, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 2))); - assert!(!meter.check_accrue(Weight::from_parts(2, 0))); - assert!(!meter.check_accrue(Weight::from_parts(2, 2))); - assert!(meter.check_accrue(Weight::from_parts(0, 1))); - assert!(meter.check_accrue(Weight::from_parts(1, 0))); - } - - #[test] - fn weight_meter_check_and_can_accrue_works() { - let mut meter = WeightMeter::max_limit(); - - assert!(meter.can_accrue(Weight::from_parts(u64::MAX, 0))); - assert!(meter.check_accrue(Weight::from_parts(u64::MAX, 0))); - - assert!(meter.can_accrue(Weight::from_parts(0, u64::MAX))); - assert!(meter.check_accrue(Weight::from_parts(0, u64::MAX))); - - assert!(!meter.can_accrue(Weight::from_parts(0, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 1))); - - assert!(!meter.can_accrue(Weight::from_parts(1, 0))); - assert!(!meter.check_accrue(Weight::from_parts(1, 0))); - - assert!(meter.can_accrue(Weight::zero())); - assert!(meter.check_accrue(Weight::zero())); - } - - #[test] - fn consumed_ratio_works() { - let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 20)); - - assert!(meter.check_accrue(Weight::from_parts(5, 0))); - assert_eq!(meter.consumed_ratio(), Perbill::from_percent(50)); - assert!(meter.check_accrue(Weight::from_parts(0, 12))); - assert_eq!(meter.consumed_ratio(), Perbill::from_percent(60)); - - assert!(meter.check_accrue(Weight::from_parts(2, 0))); - assert_eq!(meter.consumed_ratio(), Perbill::from_percent(70)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); - assert_eq!(meter.consumed_ratio(), Perbill::from_percent(80)); - - assert!(meter.check_accrue(Weight::from_parts(3, 0))); - assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); - assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); - } -} diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs deleted file mode 100644 index 2933d80099dd7..0000000000000 --- a/primitives/weights/src/weight_v2.rs +++ /dev/null @@ -1,462 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use codec::{Decode, Encode, MaxEncodedLen}; -use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; -use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; -use sp_debug_derive::RuntimeDebug; - -use super::*; - -#[derive( - Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, RuntimeDebug, Default, -)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Weight { - #[codec(compact)] - /// The weight of computational time used based on some reference hardware. - ref_time: u64, - #[codec(compact)] - /// The weight of storage space used by proof of validity. - proof_size: u64, -} - -impl From for Weight { - fn from(old: OldWeight) -> Self { - Weight::from_ref_time(old.0) - } -} - -impl Weight { - /// Set the reference time part of the weight. - pub const fn set_ref_time(mut self, c: u64) -> Self { - self.ref_time = c; - self - } - - /// Set the storage size part of the weight. - pub const fn set_proof_size(mut self, c: u64) -> Self { - self.proof_size = c; - self - } - - /// Return the reference time part of the weight. - pub const fn ref_time(&self) -> u64 { - self.ref_time - } - - /// Return the storage size part of the weight. - pub const fn proof_size(&self) -> u64 { - self.proof_size - } - - /// Return a mutable reference to the reference time part of the weight. - pub fn ref_time_mut(&mut self) -> &mut u64 { - &mut self.ref_time - } - - /// Return a mutable reference to the storage size part of the weight. - pub fn proof_size_mut(&mut self) -> &mut u64 { - &mut self.proof_size - } - - pub const MAX: Self = Self { ref_time: u64::MAX, proof_size: u64::MAX }; - - /// Get the conservative min of `self` and `other` weight. - pub fn min(&self, other: Self) -> Self { - Self { - ref_time: self.ref_time.min(other.ref_time), - proof_size: self.proof_size.min(other.proof_size), - } - } - - /// Get the aggressive max of `self` and `other` weight. - pub fn max(&self, other: Self) -> Self { - Self { - ref_time: self.ref_time.max(other.ref_time), - proof_size: self.proof_size.max(other.proof_size), - } - } - - /// Try to add some `other` weight while upholding the `limit`. - pub fn try_add(&self, other: &Self, limit: &Self) -> Option { - let total = self.checked_add(other)?; - if total.any_gt(*limit) { - None - } else { - Some(total) - } - } - - /// Construct [`Weight`] with reference time weight and 0 storage size weight. - pub const fn from_ref_time(ref_time: u64) -> Self { - Self { ref_time, proof_size: 0 } - } - - /// Construct [`Weight`] with storage size weight and 0 reference time weight. - pub const fn from_proof_size(proof_size: u64) -> Self { - Self { ref_time: 0, proof_size } - } - - /// Construct [`Weight`] from weight parts, namely reference time and proof size weights. - pub const fn from_parts(ref_time: u64, proof_size: u64) -> Self { - Self { ref_time, proof_size } - } - - /// Saturating [`Weight`] addition. Computes `self + rhs`, saturating at the numeric bounds of - /// all fields instead of overflowing. - pub const fn saturating_add(self, rhs: Self) -> Self { - Self { - ref_time: self.ref_time.saturating_add(rhs.ref_time), - proof_size: self.proof_size.saturating_add(rhs.proof_size), - } - } - - /// Saturating [`Weight`] subtraction. Computes `self - rhs`, saturating at the numeric bounds - /// of all fields instead of overflowing. - pub const fn saturating_sub(self, rhs: Self) -> Self { - Self { - ref_time: self.ref_time.saturating_sub(rhs.ref_time), - proof_size: self.proof_size.saturating_sub(rhs.proof_size), - } - } - - /// Saturating [`Weight`] scalar multiplication. Computes `self.field * scalar` for all fields, - /// saturating at the numeric bounds of all fields instead of overflowing. - pub const fn saturating_mul(self, scalar: u64) -> Self { - Self { - ref_time: self.ref_time.saturating_mul(scalar), - proof_size: self.proof_size.saturating_mul(scalar), - } - } - - /// Saturating [`Weight`] scalar division. Computes `self.field / scalar` for all fields, - /// saturating at the numeric bounds of all fields instead of overflowing. - pub const fn saturating_div(self, scalar: u64) -> Self { - Self { - ref_time: self.ref_time.saturating_div(scalar), - proof_size: self.proof_size.saturating_div(scalar), - } - } - - /// Saturating [`Weight`] scalar exponentiation. Computes `self.field.pow(exp)` for all fields, - /// saturating at the numeric bounds of all fields instead of overflowing. - pub const fn saturating_pow(self, exp: u32) -> Self { - Self { - ref_time: self.ref_time.saturating_pow(exp), - proof_size: self.proof_size.saturating_pow(exp), - } - } - - /// Increment [`Weight`] by `amount` via saturating addition. - pub fn saturating_accrue(&mut self, amount: Self) { - *self = self.saturating_add(amount); - } - - /// Checked [`Weight`] addition. Computes `self + rhs`, returning `None` if overflow occurred. - pub const fn checked_add(&self, rhs: &Self) -> Option { - let ref_time = match self.ref_time.checked_add(rhs.ref_time) { - Some(t) => t, - None => return None, - }; - let proof_size = match self.proof_size.checked_add(rhs.proof_size) { - Some(s) => s, - None => return None, - }; - Some(Self { ref_time, proof_size }) - } - - /// Checked [`Weight`] subtraction. Computes `self - rhs`, returning `None` if overflow - /// occurred. - pub const fn checked_sub(&self, rhs: &Self) -> Option { - let ref_time = match self.ref_time.checked_sub(rhs.ref_time) { - Some(t) => t, - None => return None, - }; - let proof_size = match self.proof_size.checked_sub(rhs.proof_size) { - Some(s) => s, - None => return None, - }; - Some(Self { ref_time, proof_size }) - } - - /// Checked [`Weight`] scalar multiplication. Computes `self.field * scalar` for each field, - /// returning `None` if overflow occurred. - pub const fn checked_mul(self, scalar: u64) -> Option { - let ref_time = match self.ref_time.checked_mul(scalar) { - Some(t) => t, - None => return None, - }; - let proof_size = match self.proof_size.checked_mul(scalar) { - Some(s) => s, - None => return None, - }; - Some(Self { ref_time, proof_size }) - } - - /// Checked [`Weight`] scalar division. Computes `self.field / scalar` for each field, returning - /// `None` if overflow occurred. - pub const fn checked_div(self, scalar: u64) -> Option { - let ref_time = match self.ref_time.checked_div(scalar) { - Some(t) => t, - None => return None, - }; - let proof_size = match self.proof_size.checked_div(scalar) { - Some(s) => s, - None => return None, - }; - Some(Self { ref_time, proof_size }) - } - - /// Return a [`Weight`] where all fields are zero. - pub const fn zero() -> Self { - Self { ref_time: 0, proof_size: 0 } - } - - /// Constant version of Add with u64. - /// - /// Is only overflow safe when evaluated at compile-time. - pub const fn add(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time + scalar, proof_size: self.proof_size + scalar } - } - - /// Constant version of Sub with u64. - /// - /// Is only overflow safe when evaluated at compile-time. - pub const fn sub(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time - scalar, proof_size: self.proof_size - scalar } - } - - /// Constant version of Div with u64. - /// - /// Is only overflow safe when evaluated at compile-time. - pub const fn div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time / scalar, proof_size: self.proof_size / scalar } - } - - /// Constant version of Mul with u64. - /// - /// Is only overflow safe when evaluated at compile-time. - pub const fn mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time * scalar, proof_size: self.proof_size * scalar } - } - - /// Returns true if any of `self`'s constituent weights is strictly greater than that of the - /// `other`'s, otherwise returns false. - pub const fn any_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time || self.proof_size > other.proof_size - } - - /// Returns true if all of `self`'s constituent weights is strictly greater than that of the - /// `other`'s, otherwise returns false. - pub const fn all_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time && self.proof_size > other.proof_size - } - - /// Returns true if any of `self`'s constituent weights is strictly less than that of the - /// `other`'s, otherwise returns false. - pub const fn any_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time || self.proof_size < other.proof_size - } - - /// Returns true if all of `self`'s constituent weights is strictly less than that of the - /// `other`'s, otherwise returns false. - pub const fn all_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time && self.proof_size < other.proof_size - } - - /// Returns true if any of `self`'s constituent weights is greater than or equal to that of the - /// `other`'s, otherwise returns false. - pub const fn any_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time || self.proof_size >= other.proof_size - } - - /// Returns true if all of `self`'s constituent weights is greater than or equal to that of the - /// `other`'s, otherwise returns false. - pub const fn all_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time && self.proof_size >= other.proof_size - } - - /// Returns true if any of `self`'s constituent weights is less than or equal to that of the - /// `other`'s, otherwise returns false. - pub const fn any_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time || self.proof_size <= other.proof_size - } - - /// Returns true if all of `self`'s constituent weights is less than or equal to that of the - /// `other`'s, otherwise returns false. - pub const fn all_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time && self.proof_size <= other.proof_size - } - - /// Returns true if any of `self`'s constituent weights is equal to that of the `other`'s, - /// otherwise returns false. - pub const fn any_eq(self, other: Self) -> bool { - self.ref_time == other.ref_time || self.proof_size == other.proof_size - } - - // NOTE: `all_eq` does not exist, as it's simply the `eq` method from the `PartialEq` trait. -} - -impl Zero for Weight { - fn zero() -> Self { - Self::zero() - } - - fn is_zero(&self) -> bool { - self == &Self::zero() - } -} - -impl Add for Weight { - type Output = Self; - fn add(self, rhs: Self) -> Self { - Self { - ref_time: self.ref_time + rhs.ref_time, - proof_size: self.proof_size + rhs.proof_size, - } - } -} - -impl Sub for Weight { - type Output = Self; - fn sub(self, rhs: Self) -> Self { - Self { - ref_time: self.ref_time - rhs.ref_time, - proof_size: self.proof_size - rhs.proof_size, - } - } -} - -impl Mul for Weight -where - T: Mul + Copy, -{ - type Output = Self; - fn mul(self, b: T) -> Self { - Self { ref_time: b * self.ref_time, proof_size: b * self.proof_size } - } -} - -macro_rules! weight_mul_per_impl { - ($($t:ty),* $(,)?) => { - $( - impl Mul for $t { - type Output = Weight; - fn mul(self, b: Weight) -> Weight { - Weight { - ref_time: self * b.ref_time, - proof_size: self * b.proof_size, - } - } - } - )* - } -} -weight_mul_per_impl!( - sp_arithmetic::Percent, - sp_arithmetic::PerU16, - sp_arithmetic::Permill, - sp_arithmetic::Perbill, - sp_arithmetic::Perquintill, -); - -macro_rules! weight_mul_primitive_impl { - ($($t:ty),* $(,)?) => { - $( - impl Mul for $t { - type Output = Weight; - fn mul(self, b: Weight) -> Weight { - Weight { - ref_time: u64::from(self) * b.ref_time, - proof_size: u64::from(self) * b.proof_size, - } - } - } - )* - } -} -weight_mul_primitive_impl!(u8, u16, u32, u64); - -impl Div for Weight -where - u64: Div, - T: Copy, -{ - type Output = Self; - fn div(self, b: T) -> Self { - Self { ref_time: self.ref_time / b, proof_size: self.proof_size / b } - } -} - -impl CheckedAdd for Weight { - fn checked_add(&self, rhs: &Self) -> Option { - self.checked_add(rhs) - } -} - -impl CheckedSub for Weight { - fn checked_sub(&self, rhs: &Self) -> Option { - self.checked_sub(rhs) - } -} - -impl core::fmt::Display for Weight { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Weight(ref_time: {}, proof_size: {})", self.ref_time, self.proof_size) - } -} - -impl Bounded for Weight { - fn min_value() -> Self { - Zero::zero() - } - fn max_value() -> Self { - Self::MAX - } -} - -impl AddAssign for Weight { - fn add_assign(&mut self, other: Self) { - *self = Self { - ref_time: self.ref_time + other.ref_time, - proof_size: self.proof_size + other.proof_size, - }; - } -} - -impl SubAssign for Weight { - fn sub_assign(&mut self, other: Self) { - *self = Self { - ref_time: self.ref_time - other.ref_time, - proof_size: self.proof_size - other.proof_size, - }; - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn is_zero_works() { - assert!(Weight::zero().is_zero()); - assert!(!Weight::from_parts(1, 0).is_zero()); - assert!(!Weight::from_parts(0, 1).is_zero()); - assert!(!Weight::MAX.is_zero()); - } -} diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 482d54b50f73d..eedd2ee0bb409 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -62,7 +62,7 @@ build-linux-substrate: - !reference [.rusty-cachier, before_script] script: - rusty-cachier snapshot create - - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release --verbose + - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose - mv $CARGO_TARGET_DIR/release/substrate ./artifacts/substrate/. - echo -n "Substrate version = " - if [ "${CI_COMMIT_TAG}" ]; then @@ -95,7 +95,7 @@ build-linux-substrate: script: - rusty-cachier snapshot create - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo build --locked --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --release --verbose - cd - - mv $CARGO_TARGET_DIR/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -117,7 +117,7 @@ build-subkey-macos: - mkdir -p ./artifacts/subkey script: - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo build --locked --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --release --verbose - cd - - mv ./target/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -149,7 +149,7 @@ build-rustdoc: - ./crate-docs/ script: - rusty-cachier snapshot create - - time cargo +nightly doc --locked --workspace --all-features --verbose --no-deps + - time cargo +nightly doc --workspace --all-features --verbose --no-deps - rm -f $CARGO_TARGET_DIR/doc/.lock - mv $CARGO_TARGET_DIR/doc ./crate-docs # FIXME: remove me after CI image gets nonroot diff --git a/scripts/ci/gitlab/pipeline/check.yml b/scripts/ci/gitlab/pipeline/check.yml index 878c46f32e850..3166e13313f2a 100644 --- a/scripts/ci/gitlab/pipeline/check.yml +++ b/scripts/ci/gitlab/pipeline/check.yml @@ -35,19 +35,6 @@ test-dependency-rules: script: - ./scripts/ci/gitlab/ensure-deps.sh -test-rust-features: - stage: check - extends: - - .kubernetes-env - - .test-refs-no-trigger-prs-only - allow_failure: true - script: - - git clone - --depth=1 - --branch="$PIPELINE_SCRIPTS_TAG" - https://github.com/paritytech/pipeline-scripts - - bash ./pipeline-scripts/rust-features.sh . - test-prometheus-alerting-rules: stage: check extends: .kubernetes-env @@ -64,3 +51,4 @@ test-prometheus-alerting-rules: - promtool check rules ./scripts/ci/monitoring/alerting-rules/alerting-rules.yaml - cat ./scripts/ci/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules ./scripts/ci/monitoring/alerting-rules/alerting-rule-tests.yaml + diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 4f523738b151c..ccf8338236d0a 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -59,21 +59,17 @@ cargo-check-benches: CI_JOB_NAME: "cargo-check-benches" extends: - .docker-env - - .test-refs-check-benches + - .test-refs - .collect-artifacts - - .pipeline-stopper-artifacts before_script: # perform rusty-cachier operations before any further modifications to the git repo to make cargo feel cheated not so much - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] - - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs - - | - export BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech/substrate/pulls/${$CI_COMMIT_REF_NAME} | jq .base.ref) - if [ $CI_COMMIT_REF_NAME != "master" ]; then - git fetch origin +${BASE}:${BASE}; + git fetch origin +master:master; git fetch origin +$CI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME; - git checkout ${BASE}; + git checkout master; git config user.email "ci@gitlab.parity.io"; git merge $CI_COMMIT_REF_NAME --verbose --no-edit; fi @@ -85,14 +81,14 @@ cargo-check-benches: - echo "___Running benchmarks___"; - case ${CI_NODE_INDEX} in 1) - SKIP_WASM_BUILD=1 time cargo +nightly check --locked --benches --all; - cargo run --locked --release -p node-bench -- ::trie::read::small --json + SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all; + cargo run --release -p node-bench -- ::trie::read::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; echo "___Uploading cache for rusty-cachier___"; rusty-cachier cache upload ;; 2) - cargo run --locked --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json + cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json ;; esac @@ -112,8 +108,6 @@ node-bench-regression-guard: - job: cargo-check-benches artifacts: true # polls artifact from master to compare with current result - # need to specify both parallel jobs from master because of the bug - # https://gitlab.com/gitlab-org/gitlab/-/issues/39063 - project: $CI_PROJECT_PATH job: "cargo-check-benches 1/2" ref: master @@ -131,18 +125,16 @@ node-bench-regression-guard: - echo "In case of this job failure, check your pipeline's cargo-check-benches" - 'node-bench-regression-guard --reference artifacts/benches/master-* --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' - after_script: [""] cargo-check-subkey: stage: test extends: - .docker-env - .test-refs - - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo check --locked --release + - SKIP_WASM_BUILD=1 time cargo check --release - rusty-cachier cache upload cargo-check-try-runtime: @@ -156,7 +148,7 @@ cargo-check-try-runtime: - .test-refs script: - rusty-cachier snapshot create - - time cargo check --locked --features try-runtime + - time cargo check --features try-runtime - rusty-cachier cache upload cargo-check-wasmer-sandbox: @@ -170,7 +162,7 @@ cargo-check-wasmer-sandbox: - .test-refs script: - rusty-cachier snapshot create - - time cargo check --locked --features wasmer-sandbox + - time cargo check --features wasmer-sandbox - rusty-cachier cache upload test-deterministic-wasm: @@ -189,13 +181,13 @@ test-deterministic-wasm: script: - rusty-cachier snapshot create # build runtime - - cargo build --locked --verbose --release -p kitchensink-runtime + - cargo build --verbose --release -p kitchensink-runtime # make checksum - sha256sum $CARGO_TARGET_DIR/release/wbuild/kitchensink-runtime/target/wasm32-unknown-unknown/release/kitchensink_runtime.wasm > checksum.sha256 # clean up - rm -rf $CARGO_TARGET_DIR/release/wbuild # build again - - cargo build --locked --verbose --release -p kitchensink-runtime + - cargo build --verbose --release -p kitchensink-runtime # confirm checksum - sha256sum -c ./checksum.sha256 # clean up again, don't put release binaries into the cache @@ -207,7 +199,6 @@ test-linux-stable: extends: - .docker-env - .test-refs - - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -253,8 +244,7 @@ test-frame-support: RUN_UI_TESTS: 1 script: - rusty-cachier snapshot create - - time cargo test --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - - time cargo test --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec + - time cargo test -p frame-support-test --features=conditional-storage,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - rusty-cachier cache upload @@ -283,24 +273,6 @@ test-linux-stable-extra: - time cargo test --doc --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path ./bin/node/cli/Cargo.toml - rusty-cachier cache upload -# This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. -quick-benchmarks: - stage: test - extends: - - .docker-env - - .test-refs - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - RUST_BACKTRACE: "full" - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - script: - - rusty-cachier snapshot create - - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 - - rusty-cachier cache upload - test-frame-examples-compile-to-wasm: # into one job stage: test @@ -317,9 +289,9 @@ test-frame-examples-compile-to-wasm: script: - rusty-cachier snapshot create - cd ./frame/examples/offchain-worker/ - - cargo +nightly build --locked --target=wasm32-unknown-unknown --no-default-features + - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features - cd ../basic - - cargo +nightly build --locked --target=wasm32-unknown-unknown --no-default-features + - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features - rusty-cachier cache upload test-linux-stable-int: @@ -327,7 +299,6 @@ test-linux-stable-int: extends: - .docker-env - .test-refs - - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -357,12 +328,11 @@ check-tracing: extends: - .docker-env - .test-refs - - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - - time cargo +nightly test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features - - time cargo +nightly test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing + - time cargo +nightly test --manifest-path ./primitives/tracing/Cargo.toml --no-default-features + - time cargo +nightly test --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - rusty-cachier cache upload # more information about this job can be found here: @@ -386,9 +356,9 @@ test-full-crypto-feature: script: - rusty-cachier snapshot create - cd primitives/core/ - - time cargo +nightly build --locked --verbose --no-default-features --features full_crypto + - time cargo +nightly build --verbose --no-default-features --features full_crypto - cd ../application-crypto - - time cargo +nightly build --locked --verbose --no-default-features --features full_crypto + - time cargo +nightly build --verbose --no-default-features --features full_crypto - rusty-cachier cache upload test-wasmer-sandbox: @@ -397,16 +367,12 @@ test-wasmer-sandbox: - .docker-env - .test-refs-wasmer-sandbox variables: - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" CI_JOB_NAME: "test-wasmer-sandbox" parallel: 3 script: - rusty-cachier snapshot create - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" - - time cargo nextest run --locked --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + - time cargo nextest run --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi cargo-check-macos: @@ -415,7 +381,7 @@ cargo-check-macos: before_script: - !reference [.rust-info-script, script] script: - - SKIP_WASM_BUILD=1 time cargo check --locked --release + - SKIP_WASM_BUILD=1 time cargo check --release tags: - osx @@ -431,5 +397,5 @@ check-rustdoc: RUSTDOCFLAGS: "-Dwarnings" script: - rusty-cachier snapshot create - - time cargo +nightly doc --locked --workspace --all-features --verbose --no-deps + - time cargo +nightly doc --workspace --all-features --verbose --no-deps - rusty-cachier cache upload diff --git a/scripts/ci/node-template-release/Cargo.toml b/scripts/ci/node-template-release/Cargo.toml index 0800b17536453..8871a04e19b16 100644 --- a/scripts/ci/node-template-release/Cargo.toml +++ b/scripts/ci/node-template-release/Cargo.toml @@ -10,7 +10,7 @@ homepage = "https://substrate.io" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.0", features = ["derive"] } flate2 = "1.0" fs_extra = "1" git2 = "0.8" diff --git a/scripts/ci/node-template-release/src/main.rs b/scripts/ci/node-template-release/src/main.rs index 91a7e865458cf..62e9b66715768 100644 --- a/scripts/ci/node-template-release/src/main.rs +++ b/scripts/ci/node-template-release/src/main.rs @@ -29,10 +29,10 @@ type CargoToml = HashMap; #[derive(Parser)] struct Options { /// The path to the `node-template` source. - #[arg()] + #[clap(parse(from_os_str))] node_template: PathBuf, /// The path where to output the generated `tar.gz` file. - #[arg()] + #[clap(parse(from_os_str))] output: PathBuf, } diff --git a/scripts/run_all_benchmarks.sh b/scripts/run_all_benchmarks.sh index dd5d2e182baf2..9aac58be45029 100755 --- a/scripts/run_all_benchmarks.sh +++ b/scripts/run_all_benchmarks.sh @@ -121,7 +121,6 @@ for PALLET in "${PALLETS[@]}"; do --wasm-execution=compiled \ --heap-pages=4096 \ --output="$WEIGHT_FILE" \ - --header="./HEADER-APACHE2" \ --template=./.maintain/frame-weight-template.hbs 2>&1 ) if [ $? -ne 0 ]; then @@ -138,7 +137,6 @@ OUTPUT=$( --execution=wasm \ --wasm-execution=compiled \ --weight-path="./frame/support/src/weights/" \ - --header="./HEADER-APACHE2" \ --warmup=10 \ --repeat=100 2>&1 ) diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index fcac37441ba98..ce5ef2ffcc01a 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" -async-trait = "0.1.57" +async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" +hex = "0.4" serde = "1.0.136" -serde_json = "1.0.85" +serde_json = "1.0.79" sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, features = [ "test-helpers", diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index 881c50d434264..f2b99a5b355f0 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -22,14 +22,14 @@ use sc_client_api::{backend::Finalizer, client::BlockBackend}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_service::client::Client; use sp_consensus::{BlockOrigin, Error as ConsensusError}; -use sp_runtime::{traits::Block as BlockT, Justification, Justifications}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification, Justifications}; /// Extension trait for a test client. pub trait ClientExt: Sized { /// Finalize a block. fn finalize_block( &self, - hash: Block::Hash, + id: BlockId, justification: Option, ) -> sp_blockchain::Result<()>; @@ -75,10 +75,10 @@ where { fn finalize_block( &self, - hash: Block::Hash, + id: BlockId, justification: Option, ) -> sp_blockchain::Result<()> { - Finalizer::finalize_block(self, hash, justification, true) + Finalizer::finalize_block(self, id, justification, true) } fn genesis_hash(&self) -> ::Hash { diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index d3e71f0ad28d6..148f34246044d 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -26,7 +26,7 @@ pub use sc_client_api::{ execution_extensions::{ExecutionExtensions, ExecutionStrategies}, BadBlocks, ForkBlocks, }; -pub use sc_client_db::{self, Backend, BlocksPruning}; +pub use sc_client_db::{self, Backend}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; @@ -95,15 +95,14 @@ impl } /// Create new `TestClientBuilder` with default backend and pruning window size - pub fn with_pruning_window(blocks_pruning: u32) -> Self { - let backend = Arc::new(Backend::new_test(blocks_pruning, 0)); + pub fn with_pruning_window(keep_blocks: u32) -> Self { + let backend = Arc::new(Backend::new_test(keep_blocks, 0)); Self::with_backend(backend) } /// Create new `TestClientBuilder` with default backend and storage chain mode - pub fn with_tx_storage(blocks_pruning: u32) -> Self { - let backend = - Arc::new(Backend::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), 0)); + pub fn with_tx_storage(keep_blocks: u32) -> Self { + let backend = Arc::new(Backend::new_test_with_tx_storage(keep_blocks, 0)); Self::with_backend(backend) } } @@ -347,7 +346,7 @@ impl RpcHandlersExt for RpcHandlers { "params": ["0x{}"], "id": 0 }}"#, - array_bytes::bytes2hex("", &extrinsic.encode()) + hex::encode(extrinsic.encode()) )) .await .expect("valid JSON-RPC request object; qed"); diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 698351cd69f64..1c2707b3719ad 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "6.0.0", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.30.0", default-features = false } +memory-db = { version = "0.29.0", default-features = false } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -41,8 +41,8 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "6.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.24.0", default-features = false } -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } +trie-db = { version = "0.23.1", default-features = false } +parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.12.0", default-features = false, path = "../../primitives/state-machine" } sp-externalities = { version = "0.12.0", default-features = false, path = "../../primitives/externalities" } @@ -67,7 +67,6 @@ default = [ "std", ] std = [ - "parity-util-mem/std", "beefy-primitives/std", "beefy-merkle-tree/std", "sp-application-crypto/std", diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 99d4e1163e272..fe0fef3516671 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -134,7 +134,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); } - let child_roots = storage.children_default.values().map(|child_content| { + let child_roots = storage.children_default.iter().map(|(_sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect(), diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 42706730eb9ac..3ece5165e1757 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -17,9 +17,8 @@ //! Tool for creating the genesis block. -use super::{system, wasm_binary_unwrap, AccountId, AuthorityId, Runtime}; +use super::{system, wasm_binary_unwrap, AccountId, AuthorityId}; use codec::{Encode, Joiner, KeyedVec}; -use frame_support::traits::GenesisBuild; use sc_service::client::genesis; use sp_core::{ map, @@ -81,11 +80,10 @@ impl GenesisConfig { // Assimilate the system genesis config. let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone() }; - >::assimilate_storage( - &system::GenesisConfig { authorities: self.authorities.clone() }, - &mut storage, - ) - .expect("Adding `system::GensisConfig` to the genesis"); + let config = system::GenesisConfig { authorities: self.authorities.clone() }; + config + .assimilate_storage(&mut storage) + .expect("Adding `system::GensisConfig` to the genesis"); storage } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 8bda4ea602428..e5cfae49da56d 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,18 +29,14 @@ use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; -use sp_trie::{ - trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, - PrefixedMemoryDB, StorageProof, -}; +use sp_trie::{trie_types::TrieDB, PrefixedMemoryDB, StorageProof}; use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; use frame_support::{ - dispatch::RawOrigin, parameter_types, - traits::{CallerTrait, ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, - weights::{RuntimeDbWeight, Weight}, + traits::{ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, + weights::RuntimeDbWeight, }; use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; @@ -63,6 +59,8 @@ use sp_runtime::{ #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; +// bench on latest state. +use sp_trie::trie_types::TrieDBMutV1 as TrieDBMut; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; @@ -120,7 +118,7 @@ pub fn native_version() -> NativeVersion { } /// Calls in transactions. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct Transfer { pub from: AccountId, pub to: AccountId, @@ -151,7 +149,7 @@ impl Transfer { } /// Extrinsic for test-runtime. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub enum Extrinsic { AuthoritiesChange(Vec), Transfer { @@ -234,14 +232,11 @@ impl ExtrinsicT for Extrinsic { } impl sp_runtime::traits::Dispatchable for Extrinsic { - type RuntimeOrigin = RuntimeOrigin; + type Origin = Origin; type Config = (); type Info = (); type PostInfo = (); - fn dispatch( - self, - _origin: Self::RuntimeOrigin, - ) -> sp_runtime::DispatchResultWithInfo { + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { panic!("This implementation should not be used for actual dispatch."); } } @@ -445,33 +440,22 @@ impl GetRuntimeBlockType for Runtime { } #[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq, TypeInfo, MaxEncodedLen)] -pub struct RuntimeOrigin; +pub struct Origin; -impl From::AccountId>> for RuntimeOrigin { - fn from(_: RawOrigin<::AccountId>) -> Self { +impl From> for Origin { + fn from(_o: frame_system::Origin) -> Self { unimplemented!("Not required in tests!") } } - -impl CallerTrait<::AccountId> for RuntimeOrigin { - fn into_system(self) -> Option::AccountId>> { - unimplemented!("Not required in tests!") - } - - fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { +impl From for Result, Origin> { + fn from(_origin: Origin) -> Result, Origin> { unimplemented!("Not required in tests!") } } -impl From for Result, RuntimeOrigin> { - fn from(_origin: RuntimeOrigin) -> Result, RuntimeOrigin> { - unimplemented!("Not required in tests!") - } -} - -impl frame_support::traits::OriginTrait for RuntimeOrigin { - type Call = ::RuntimeCall; - type PalletsOrigin = RuntimeOrigin; +impl frame_support::traits::OriginTrait for Origin { + type Call = ::Call; + type PalletsOrigin = Origin; type AccountId = ::AccountId; fn add_filter(&mut self, _filter: impl Fn(&Self::Call) -> bool + 'static) { @@ -494,10 +478,6 @@ impl frame_support::traits::OriginTrait for RuntimeOrigin { unimplemented!("Not required in tests!") } - fn into_caller(self) -> Self::PalletsOrigin { - unimplemented!("Not required in tests!") - } - fn try_with_caller( self, _f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -517,15 +497,12 @@ impl frame_support::traits::OriginTrait for RuntimeOrigin { fn as_signed(self) -> Option { unimplemented!("Not required in tests!") } - fn as_system_ref(&self) -> Option<&RawOrigin> { - unimplemented!("Not required in tests!") - } } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct RuntimeEvent; +pub struct Event; -impl From> for RuntimeEvent { +impl From> for Event { fn from(_evt: frame_system::Event) -> Self { unimplemented!("Not required in tests!") } @@ -599,21 +576,15 @@ parameter_types! { pub RuntimeBlockLength: BlockLength = BlockLength::max(4 * 1024 * 1024); pub RuntimeBlockWeights: BlockWeights = - BlockWeights::with_sensible_defaults(Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75)); -} - -impl From> for Extrinsic { - fn from(_: frame_system::Call) -> Self { - unimplemented!("Not required in tests!") - } + BlockWeights::with_sensible_defaults(4 * 1024 * 1024, Perbill::from_percent(75)); } impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = Extrinsic; + type Origin = Origin; + type Call = Extrinsic; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -621,7 +592,7 @@ impl frame_system::Config for Runtime { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type RuntimeEvent = RuntimeEvent; + type Event = Event; type BlockHashCount = ConstU64<2400>; type DbWeight = (); type Version = (); @@ -635,8 +606,6 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } -impl system::Config for Runtime {} - impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; @@ -694,19 +663,25 @@ fn code_using_trie() -> u64 { let mut mdb = PrefixedMemoryDB::default(); let mut root = sp_std::default::Default::default(); - { - let mut t = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); + let _ = { + let mut t = TrieDBMut::::new(&mut mdb, &mut root); for (key, value) in &pairs { if t.insert(key, value).is_err() { return 101 } } - } - - let trie = TrieDBBuilder::::new(&mdb, &root).build(); - let res = if let Ok(iter) = trie.iter() { iter.flatten().count() as u64 } else { 102 }; + t + }; - res + if let Ok(trie) = TrieDB::::new(&mdb, &root) { + if let Ok(iter) = trie.iter() { + iter.flatten().count() as u64 + } else { + 102 + } + } else { + 103 + } } impl_opaque_keys! { @@ -879,12 +854,12 @@ cfg_if! { } impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeConfiguration { - sp_consensus_babe::BabeConfiguration { + fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { + sp_consensus_babe::BabeGenesisConfiguration { slot_duration: 1000, epoch_length: EpochDuration::get(), c: (3, 10), - authorities: system::authorities() + genesis_authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, @@ -1153,12 +1128,12 @@ cfg_if! { } impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeConfiguration { - sp_consensus_babe::BabeConfiguration { + fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { + sp_consensus_babe::BabeGenesisConfiguration { slot_duration: 1000, epoch_length: EpochDuration::get(), c: (3, 10), - authorities: system::authorities() + genesis_authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, @@ -1302,7 +1277,7 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default(); let mut ext = sp_state_machine::Ext::new( @@ -1340,7 +1315,7 @@ mod tests { .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .set_heap_pages(8) .build(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger // than the heap. @@ -1369,7 +1344,7 @@ mod tests { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); runtime_api.test_storage(&block_id).unwrap(); } @@ -1379,8 +1354,7 @@ mod tests { let mut root = crate::Hash::default(); let mut mdb = sp_trie::MemoryDB::::default(); { - let mut trie = - sp_trie::trie_types::TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); + let mut trie = sp_trie::trie_types::TrieDBMutV1::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); }; @@ -1390,13 +1364,12 @@ mod tests { #[test] fn witness_backend_works() { let (db, root) = witness_backend(); - let backend = - sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Hash(client.chain_info().best_hash); + let block_id = BlockId::Number(client.chain_info().best_number); runtime_api.test_witness(&block_id, proof, root).unwrap(); } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 6e33d5c25fe6f..77cd18c028364 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -19,11 +19,11 @@ //! and depositing logs. use crate::{ - AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Runtime, Transfer, - H256 as Hash, + AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; -use frame_support::storage; +use frame_support::{decl_module, decl_storage, storage}; +use frame_system::Config; use sp_core::storage::well_known_keys; use sp_io::{hashing::blake2_256, storage::root as storage_root, trie}; use sp_runtime::{ @@ -39,51 +39,19 @@ use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; -pub use self::pallet::*; - -#[frame_support::pallet] -mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData); - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::storage] - pub type ExtrinsicData = StorageMap<_, Blake2_128Concat, u32, Vec, ValueQuery>; - - // The current block number being processed. Set by `execute_block`. - #[pallet::storage] - pub type Number = StorageValue<_, BlockNumber, OptionQuery>; - - #[pallet::storage] - pub type ParentHash = StorageValue<_, Hash, ValueQuery>; - - #[pallet::storage] - pub type NewAuthorities = StorageValue<_, Vec, OptionQuery>; - - #[pallet::storage] - pub type StorageDigest = StorageValue<_, Digest, OptionQuery>; - - #[pallet::storage] - pub type Authorities = StorageValue<_, Vec, ValueQuery>; - - #[pallet::genesis_config] - #[cfg_attr(feature = "std", derive(Default))] - pub struct GenesisConfig { - pub authorities: Vec, - } +decl_module! { + pub struct Module for enum Call where origin: T::Origin {} +} - #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { - fn build(&self) { - >::put(self.authorities.clone()); - } +decl_storage! { + trait Store for Module as TestRuntime { + ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; + // The current block number being processed. Set by `execute_block`. + Number get(fn number): Option; + ParentHash get(fn parent_hash): Hash; + NewAuthorities get(fn new_authorities): Option>; + StorageDigest get(fn storage_digest): Option; + Authorities get(fn authorities) config(): Vec; } } @@ -101,9 +69,9 @@ pub fn nonce_of(who: AccountId) -> u64 { pub fn initialize_block(header: &Header) { // populate environment. - >::put(&header.number); - >::put(&header.parent_hash); - >::put(header.digest()); + ::put(&header.number); + ::put(&header.parent_hash); + ::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest @@ -114,15 +82,15 @@ pub fn initialize_block(header: &Header) { } pub fn authorities() -> Vec { - >::get() + Authorities::get() } pub fn get_block_number() -> Option { - >::get() + Number::get() } pub fn take_block_number() -> Option { - >::take() + Number::take() } #[derive(Copy, Clone)] @@ -156,8 +124,8 @@ fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) -> Heade header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); - assert_eq!( - new_header.state_root, header.state_root, + assert!( + new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } @@ -166,8 +134,8 @@ fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) -> Heade header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); - assert_eq!( - new_header.extrinsics_root, header.extrinsics_root, + assert!( + new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } @@ -219,7 +187,7 @@ pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); let result = execute_transaction_backend(&utx, extrinsic_index); - >::insert(extrinsic_index, utx.encode()); + ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } @@ -228,14 +196,13 @@ pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { pub fn finalize_block() -> Header { use sp_core::storage::StateVersion; let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let txs: Vec<_> = (0..extrinsic_index).map(>::take).collect(); + let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs, StateVersion::V0); - let number = >::take().expect("Number is set by `initialize_block`"); - let parent_hash = >::take(); - let mut digest = - >::take().expect("StorageDigest is set by `initialize_block`"); + let number = ::take().expect("Number is set by `initialize_block`"); + let parent_hash = ::take(); + let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); - let o_new_authorities = >::take(); + let o_new_authorities = ::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. @@ -313,7 +280,7 @@ fn execute_store(data: Vec) -> ApplyExtrinsicResult { } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { - >::put(new_authorities.to_vec()); + NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } @@ -355,6 +322,7 @@ mod tests { use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, + NeverNativeValue, }; use sp_io::{hashing::twox_128, TestExternalities}; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; @@ -438,7 +406,14 @@ mod tests { }; executor() - .call(&mut ext, &runtime_code, "Core_execute_block", &b.encode(), false) + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) .0 .unwrap(); }) @@ -540,7 +515,14 @@ mod tests { }; executor() - .call(&mut ext, &runtime_code, "Core_execute_block", &b.encode(), false) + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) .0 .unwrap(); }) diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index fa6dde5b5b57e..98378309ad9c1 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -parking_lot = "0.12.1" +parking_lot = "0.12.0" thiserror = "1.0" sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index f8d551a6fa5bd..4008427623499 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -22,7 +22,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; -use sp_blockchain::{CachedHeaderMetadata, TreeRoute}; +use sp_blockchain::CachedHeaderMetadata; use sp_runtime::{ generic::{self, BlockId}, traits::{ @@ -315,13 +315,13 @@ impl sc_transaction_pool::ChainApi for TestApi { Self::hash_and_length_inner(ex) } - fn block_body(&self, hash: ::Hash) -> Self::BodyFuture { - futures::future::ready(Ok(self - .chain - .read() - .block_by_hash - .get(&hash) - .map(|b| b.extrinsics().to_vec()))) + fn block_body(&self, id: &BlockId) -> Self::BodyFuture { + futures::future::ready(Ok(match id { + BlockId::Number(num) => + self.chain.read().block_by_number.get(num).map(|b| b[0].0.extrinsics().to_vec()), + BlockId::Hash(hash) => + self.chain.read().block_by_hash.get(hash).map(|b| b.extrinsics().to_vec()), + })) } fn block_header( @@ -335,14 +335,6 @@ impl sc_transaction_pool::ChainApi for TestApi { self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), }) } - - fn tree_route( - &self, - from: ::Hash, - to: ::Hash, - ) -> Result, Self::Error> { - sp_blockchain::tree_route::(self, from, to).map_err(Into::into) - } } impl sp_blockchain::HeaderMetadata for TestApi { diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index a2d548f1fa5cd..31b8f1332a653 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -13,24 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "4.1" chrono = "0.4" -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } comfy-table = { version = "6.0.0", default-features = false } handlebars = "4.2.2" hash-db = "0.15.2" +hex = "0.4.3" Inflector = "0.11.4" itertools = "0.10.3" -kvdb = "0.12.0" +kvdb = "0.11.0" lazy_static = "1.4.0" linked-hash-map = "0.5.4" log = "0.4.17" -memory-db = "0.30.0" +memory-db = "0.29.0" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.136" -serde_json = "1.0.85" +serde_json = "1.0.79" serde_nanos = "0.1.2" tempfile = "3.2.0" thiserror = "1.0.30" @@ -54,12 +54,11 @@ sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } -sp-std = { version = "4.0.0", path = "../../../primitives/std" } sp-storage = { version = "6.0.0", path = "../../../primitives/storage" } sp-trie = { version = "6.0.0", path = "../../../primitives/trie" } gethostname = "0.2.3" [features] -default = ["rocksdb"] +default = ["rocksdb", "runtime-benchmarks"] runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"] diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 5a67b11f494f5..e48a7e8b3c6f5 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -22,9 +22,7 @@ use frame_support::weights::constants::WEIGHT_PER_NANOS; use frame_system::ConsumedWeight; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{Error, Result}; -use sc_client_api::{ - Backend as ClientBackend, BlockBackend, HeaderBackend, StorageProvider, UsageProvider, -}; +use sc_client_api::{Backend as ClientBackend, BlockBackend, StorageProvider, UsageProvider}; use sp_api::{ApiExt, Core, HeaderT, ProvideRuntimeApi}; use sp_blockchain::Error::RuntimeApiError; use sp_runtime::{generic::BlockId, traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; @@ -45,15 +43,15 @@ const LOG_TARGET: &'static str = "benchmark::block::weight"; #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] pub struct BenchmarkParams { /// Number of the first block to consider. - #[arg(long)] + #[clap(long)] pub from: u32, /// Last block number to consider. - #[arg(long)] + #[clap(long)] pub to: u32, /// Number of times that the benchmark should be repeated for each block. - #[arg(long, default_value_t = 10)] + #[clap(long, default_value = "10")] pub repeat: u32, } @@ -75,8 +73,7 @@ where + ProvideRuntimeApi + StorageProvider + UsageProvider - + BlockBackend - + HeaderBackend, + + BlockBackend, C::Api: ApiExt + BlockBuilderApi, { /// Returns a new [`Self`] from the arguments. @@ -134,22 +131,18 @@ where fn consumed_weight(&self, block: &BlockId) -> Result { // Hard-coded key for System::BlockWeight. It could also be passed in as argument // for the benchmark, but I think this should work as well. - let hash = array_bytes::hex2bytes( - "26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96", - )?; + let hash = hex::decode("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96")?; let key = StorageKey(hash); - let block_hash = self.client.expect_block_hash_from_id(block)?; let mut raw_weight = &self .client - .storage(block_hash, &key)? + .storage(&block, &key)? .ok_or(format!("Could not find System::BlockWeight for block: {}", block))? .0[..]; let weight = ConsumedWeight::decode_all(&mut raw_weight)?; // Should be divisible, but still use floats in case we ever change that. - Ok((weight.total().ref_time() as f64 / WEIGHT_PER_NANOS.ref_time() as f64).floor() - as NanoSeconds) + Ok((weight.total() as f64 / WEIGHT_PER_NANOS as f64).floor() as NanoSeconds) } /// Prints the weight info of a block to the console. diff --git a/utils/frame/benchmarking-cli/src/block/cmd.rs b/utils/frame/benchmarking-cli/src/block/cmd.rs index 8bac04110f7ab..e4e1716b1c5ac 100644 --- a/utils/frame/benchmarking-cli/src/block/cmd.rs +++ b/utils/frame/benchmarking-cli/src/block/cmd.rs @@ -22,7 +22,6 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::{Backend as ClientBackend, BlockBackend, StorageProvider, UsageProvider}; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_blockchain::HeaderBackend; use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; use clap::Parser; @@ -68,12 +67,6 @@ pub struct BlockCmd { #[allow(missing_docs)] #[clap(flatten)] pub params: BenchmarkParams, - - /// Enable the Trie cache. - /// - /// This should only be used for performance analysis and not for final results. - #[arg(long)] - pub enable_trie_cache: bool, } impl BlockCmd { @@ -88,8 +81,7 @@ impl BlockCmd { + BlockBackend + ProvideRuntimeApi + StorageProvider - + UsageProvider - + HeaderBackend, + + UsageProvider, C::Api: ApiExt + BlockBuilderApi, { // Put everything in the benchmark type to have the generic types handy. @@ -106,12 +98,4 @@ impl CliConfiguration for BlockCmd { fn import_params(&self) -> Option<&ImportParams> { Some(&self.import_params) } - - fn trie_cache_maximum_size(&self) -> Result> { - if self.enable_trie_cache { - Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) - } else { - Ok(None) - } - } } diff --git a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 2a86c10e7ad27..27fc40fb34774 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -28,7 +28,7 @@ use sp_blockchain::{ use sp_runtime::{ traits::{Block as BlockT, Zero}, transaction_validity::{InvalidTransaction, TransactionValidityError}, - Digest, DigestItem, OpaqueExtrinsic, + OpaqueExtrinsic, }; use clap::Args; @@ -43,17 +43,17 @@ use crate::shared::{StatSelect, Stats}; #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] pub struct BenchmarkParams { /// Rounds of warmups before measuring. - #[arg(long, default_value_t = 10)] + #[clap(long, default_value = "10")] pub warmup: u32, /// How many times the benchmark should be repeated. - #[arg(long, default_value_t = 100)] + #[clap(long, default_value = "100")] pub repeat: u32, /// Maximal number of extrinsics that should be put into a block. /// /// Only useful for debugging. - #[arg(long)] + #[clap(long)] pub max_ext_per_block: Option, } @@ -65,7 +65,6 @@ pub(crate) struct Benchmark { client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, - digest_items: Vec, _p: PhantomData<(Block, BA)>, } @@ -81,9 +80,8 @@ where client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, - digest_items: Vec, ) -> Self { - Self { client, params, inherent_data, digest_items, _p: PhantomData } + Self { client, params, inherent_data, _p: PhantomData } } /// Benchmark a block with only inherents. @@ -127,7 +125,7 @@ where &self, ext_builder: Option<&dyn ExtrinsicBuilder>, ) -> Result<(Block, Option)> { - let mut builder = self.client.new_block(Digest { logs: self.digest_items.clone() })?; + let mut builder = self.client.new_block(Default::default())?; // Create and insert the inherents. let inherents = builder.create_inherents(self.inherent_data.clone())?; for inherent in inherents { diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index b95cd6b5c2e42..6b4fd0fad6638 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -19,7 +19,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::Backend as ClientBackend; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; +use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; use clap::{Args, Parser}; use log::info; @@ -62,22 +62,16 @@ pub struct ExtrinsicParams { /// List all available pallets and extrinsics. /// /// The format is CSV with header `pallet, extrinsic`. - #[arg(long)] + #[clap(long)] pub list: bool, /// Pallet name of the extrinsic to benchmark. - #[arg(long, value_name = "PALLET", required_unless_present = "list")] + #[clap(long, value_name = "PALLET", required_unless_present = "list")] pub pallet: Option, /// Extrinsic to benchmark. - #[arg(long, value_name = "EXTRINSIC", required_unless_present = "list")] + #[clap(long, value_name = "EXTRINSIC", required_unless_present = "list")] pub extrinsic: Option, - - /// Enable the Trie cache. - /// - /// This should only be used for performance analysis and not for final results. - #[arg(long)] - pub enable_trie_cache: bool, } impl ExtrinsicCmd { @@ -88,7 +82,6 @@ impl ExtrinsicCmd { &self, client: Arc, inherent_data: sp_inherents::InherentData, - digest_items: Vec, ext_factory: &ExtrinsicFactory, ) -> Result<()> where @@ -116,7 +109,7 @@ impl ExtrinsicCmd { return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), }; - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); let stats = bench.bench_extrinsic(ext_builder)?; info!( "Executing a {}::{} extrinsic takes[ns]:\n{:?}", @@ -138,12 +131,4 @@ impl CliConfiguration for ExtrinsicCmd { fn import_params(&self) -> Option<&ImportParams> { Some(&self.import_params) } - - fn trie_cache_maximum_size(&self) -> Result> { - if self.params.enable_trie_cache { - Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) - } else { - Ok(None) - } - } } diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index a44a208b16ae9..96c7a997895d4 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -30,7 +30,6 @@ pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; pub use overhead::OverheadCmd; pub use pallet::PalletCmd; -pub use sc_service::BasePath; pub use storage::StorageCmd; use sc_cli::{CliConfiguration, DatabaseParams, ImportParams, PruningParams, Result, SharedParams}; @@ -88,28 +87,15 @@ impl CliConfiguration for BenchmarkCmd { } } - fn base_path(&self) -> Result> { - let inner = unwrap_cmd! { - self, cmd, cmd.base_path() - }; - - // If the base path was not provided, benchmark command shall use temporary path. Otherwise - // we may end up using shared path, which may be inappropriate for benchmarking. - match inner { - Ok(None) => Some(BasePath::new_temp_dir()).transpose().map_err(|e| e.into()), - e => e, - } - } - fn pruning_params(&self) -> Option<&PruningParams> { unwrap_cmd! { self, cmd, cmd.pruning_params() } } - fn trie_cache_maximum_size(&self) -> Result> { + fn state_cache_size(&self) -> Result { unwrap_cmd! { - self, cmd, cmd.trie_cache_maximum_size() + self, cmd, cmd.state_cache_size() } } diff --git a/utils/frame/benchmarking-cli/src/machine/hardware.rs b/utils/frame/benchmarking-cli/src/machine/hardware.rs index 50c88ec74646c..5c62660cc7cf4 100644 --- a/utils/frame/benchmarking-cli/src/machine/hardware.rs +++ b/utils/frame/benchmarking-cli/src/machine/hardware.rs @@ -18,40 +18,8 @@ //! Contains types to define hardware requirements. use lazy_static::lazy_static; -use sc_sysinfo::Throughput; -use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; -use sp_std::{fmt, fmt::Formatter}; - -/// Serializes throughput into MiBs and represents it as `f64`. -fn serialize_throughput_as_f64(throughput: &Throughput, serializer: S) -> Result -where - S: Serializer, -{ - serializer.serialize_f64(throughput.as_mibs()) -} - -struct ThroughputVisitor; -impl<'de> Visitor<'de> for ThroughputVisitor { - type Value = Throughput; - - fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { - formatter.write_str("A value that is a f64.") - } - - fn visit_f64(self, value: f64) -> Result - where - E: serde::de::Error, - { - Ok(Throughput::from_mibs(value)) - } -} - -fn deserialize_throughput<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - Ok(deserializer.deserialize_f64(ThroughputVisitor))? -} +use serde::{Deserialize, Serialize}; +use std::fmt; lazy_static! { /// The hardware requirements as measured on reference hardware. @@ -77,10 +45,6 @@ pub struct Requirement { /// The metric to measure. pub metric: Metric, /// The minimal throughput that needs to be archived for this requirement. - #[serde( - serialize_with = "serialize_throughput_as_f64", - deserialize_with = "deserialize_throughput" - )] pub minimum: Throughput, } @@ -101,6 +65,17 @@ pub enum Metric { DiskRndWrite, } +/// Throughput as measured in bytes per second. +#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)] +pub enum Throughput { + /// KiB/s + KiBs(f64), + /// MiB/s + MiBs(f64), + /// GiB/s + GiBs(f64), +} + impl Metric { /// The category of the metric. pub fn category(&self) -> &'static str { @@ -123,9 +98,70 @@ impl Metric { } } +const KIBIBYTE: f64 = 1024.0; + +impl Throughput { + /// The unit of the metric. + pub fn unit(&self) -> &'static str { + match self { + Self::KiBs(_) => "KiB/s", + Self::MiBs(_) => "MiB/s", + Self::GiBs(_) => "GiB/s", + } + } + + /// [`Self`] as number of byte/s. + pub fn to_bs(&self) -> f64 { + self.to_kibs() * KIBIBYTE + } + + /// [`Self`] as number of kibibyte/s. + pub fn to_kibs(&self) -> f64 { + self.to_mibs() * KIBIBYTE + } + + /// [`Self`] as number of mebibyte/s. + pub fn to_mibs(&self) -> f64 { + self.to_gibs() * KIBIBYTE + } + + /// [`Self`] as number of gibibyte/s. + pub fn to_gibs(&self) -> f64 { + match self { + Self::KiBs(k) => *k / (KIBIBYTE * KIBIBYTE), + Self::MiBs(m) => *m / KIBIBYTE, + Self::GiBs(g) => *g, + } + } + + /// Normalizes [`Self`] to use the larges unit possible. + pub fn normalize(&self) -> Self { + let bs = self.to_bs(); + + if bs >= KIBIBYTE * KIBIBYTE * KIBIBYTE { + Self::GiBs(self.to_gibs()) + } else if bs >= KIBIBYTE * KIBIBYTE { + Self::MiBs(self.to_mibs()) + } else { + Self::KiBs(self.to_kibs()) + } + } +} + +impl fmt::Display for Throughput { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let normalized = self.normalize(); + match normalized { + Self::KiBs(s) | Self::MiBs(s) | Self::GiBs(s) => + write!(f, "{:.2?} {}", s, normalized.unit()), + } + } +} + #[cfg(test)] mod tests { use super::*; + use sp_runtime::assert_eq_error_rate; /// `SUBSTRATE_REFERENCE_HARDWARE` can be en- and decoded. #[test] @@ -135,4 +171,21 @@ mod tests { assert_eq!(decoded, SUBSTRATE_REFERENCE_HARDWARE.clone()); } + + /// Test the [`Throughput`]. + #[test] + fn throughput_works() { + /// Float precision. + const EPS: f64 = 0.1; + let gib = Throughput::GiBs(14.324); + + assert_eq_error_rate!(14.324, gib.to_gibs(), EPS); + assert_eq_error_rate!(14667.776, gib.to_mibs(), EPS); + assert_eq_error_rate!(14667.776 * 1024.0, gib.to_kibs(), EPS); + assert_eq!("14.32 GiB/s", gib.to_string()); + assert_eq!("14.32 GiB/s", gib.normalize().to_string()); + + let mib = Throughput::MiBs(1029.0); + assert_eq!("1.00 GiB/s", mib.to_string()); + } } diff --git a/utils/frame/benchmarking-cli/src/machine/mod.rs b/utils/frame/benchmarking-cli/src/machine/mod.rs index 82b4e5be7358e..5f27c71983905 100644 --- a/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -30,11 +30,11 @@ use sc_cli::{CliConfiguration, Result, SharedParams}; use sc_service::Configuration; use sc_sysinfo::{ benchmark_cpu, benchmark_disk_random_writes, benchmark_disk_sequential_writes, - benchmark_memory, benchmark_sr25519_verify, ExecutionLimit, Throughput, + benchmark_memory, benchmark_sr25519_verify, ExecutionLimit, }; use crate::shared::check_build_profile; -pub use hardware::{Metric, Requirement, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; +pub use hardware::{Metric, Requirement, Requirements, Throughput, SUBSTRATE_REFERENCE_HARDWARE}; /// Command to benchmark the hardware. /// @@ -53,30 +53,30 @@ pub struct MachineCmd { /// Do not return an error if any check fails. /// /// Should only be used for debugging. - #[arg(long)] + #[clap(long)] pub allow_fail: bool, /// Set a fault tolerance for passing a requirement. /// /// 10% means that the test would pass even when only 90% score was archived. /// Can be used to mitigate outliers of the benchmarks. - #[arg(long, default_value_t = 10.0, value_name = "PERCENT")] + #[clap(long, default_value = "10.0", value_name = "PERCENT")] pub tolerance: f64, /// Time limit for the verification benchmark. - #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] + #[clap(long, default_value = "5.0", value_name = "SECONDS")] pub verify_duration: f32, /// Time limit for the hash function benchmark. - #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] + #[clap(long, default_value = "5.0", value_name = "SECONDS")] pub hash_duration: f32, /// Time limit for the memory benchmark. - #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] + #[clap(long, default_value = "5.0", value_name = "SECONDS")] pub memory_duration: f32, /// Time limit for each disk benchmark. - #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] + #[clap(long, default_value = "5.0", value_name = "SECONDS")] pub disk_duration: f32, } @@ -128,9 +128,8 @@ impl MachineCmd { /// Benchmarks a specific metric of the hardware and judges the resulting score. fn run_benchmark(&self, requirement: &Requirement, dir: &Path) -> Result { // Dispatch the concrete function from `sc-sysinfo`. - let score = self.measure(&requirement.metric, dir)?; - let rel_score = score.as_bytes() / requirement.minimum.as_bytes(); + let rel_score = score.to_bs() / requirement.minimum.to_bs(); // Sanity check if the result is off by factor >100x. if rel_score >= 100.0 || rel_score <= 0.01 { @@ -148,11 +147,13 @@ impl MachineCmd { let memory_limit = ExecutionLimit::from_secs_f32(self.memory_duration); let score = match metric { - Metric::Blake2256 => benchmark_cpu(hash_limit), - Metric::Sr25519Verify => benchmark_sr25519_verify(verify_limit), - Metric::MemCopy => benchmark_memory(memory_limit), - Metric::DiskSeqWrite => benchmark_disk_sequential_writes(disk_limit, dir)?, - Metric::DiskRndWrite => benchmark_disk_random_writes(disk_limit, dir)?, + Metric::Blake2256 => Throughput::MiBs(benchmark_cpu(hash_limit) as f64), + Metric::Sr25519Verify => Throughput::MiBs(benchmark_sr25519_verify(verify_limit)), + Metric::MemCopy => Throughput::MiBs(benchmark_memory(memory_limit) as f64), + Metric::DiskSeqWrite => + Throughput::MiBs(benchmark_disk_sequential_writes(disk_limit, dir)? as f64), + Metric::DiskRndWrite => + Throughput::MiBs(benchmark_disk_random_writes(disk_limit, dir)? as f64), }; Ok(score) } diff --git a/utils/frame/benchmarking-cli/src/machine/reference_hardware.json b/utils/frame/benchmarking-cli/src/machine/reference_hardware.json index 2a451d31403f1..12645df8391e7 100644 --- a/utils/frame/benchmarking-cli/src/machine/reference_hardware.json +++ b/utils/frame/benchmarking-cli/src/machine/reference_hardware.json @@ -1,22 +1,32 @@ [ { "metric": "Blake2256", - "minimum": 1029.0 + "minimum": { + "MiBs": 1029.0 + } }, { "metric": "Sr25519Verify", - "minimum": 0.650391 + "minimum": { + "KiBs": 666.0 + } }, { "metric": "MemCopy", - "minimum": 14666.752 + "minimum": { + "GiBs": 14.323 + } }, { "metric": "DiskSeqWrite", - "minimum": 450.0 + "minimum": { + "MiBs": 450.0 + } }, { "metric": "DiskRndWrite", - "minimum": 200.0 + "minimum": { + "MiBs": 200.0 + } } ] diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index b21d051e9d44c..6f41e881d0572 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -1,21 +1,21 @@ # The `benchmark overhead` command -Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". -This is necessary since the weight that is calculated by the pallet benchmarks does not include this overhead. -The exact overhead to can vary per Substrate chain and needs to be calculated per chain. +Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". +This is necessary since the weight that is calculated by the pallet benchmarks does not include this overhead. +The exact overhead to can vary per Substrate chain and needs to be calculated per chain. This command calculates the exact values of these overhead weights for any Substrate chain that supports it. ## How does it work? -The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. +The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. Both are executed sequentially when invoking the command. ## BlockExecutionWeight -The block execution weight is defined as the weight that it takes to execute an *empty block*. -It is measured by constructing an empty block and measuring its executing time. -The result are written to a `block_weights.rs` file which is created from a template. -The file will contain the concrete weight value and various statistics about the measurements. For example: +The block execution weight is defined as the weight that it takes to execute an *empty block*. +It is measured by constructing an empty block and measuring its executing time. +The result are written to a `block_weights.rs` file which is created from a template. +The file will contain the concrete weight value and various statistics about the measurements. For example: ```rust /// Time to execute an empty block. /// Calculated by multiplying the *Average* with `1` and adding `0`. @@ -30,21 +30,21 @@ The file will contain the concrete weight value and various statistics about the /// 99th: 3_631_863 /// 95th: 3_595_674 /// 75th: 3_526_435 -pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(3_532_484); +pub const BlockExecutionWeight: Weight = 3_532_484 * WEIGHT_PER_NANOS; ``` -In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. +In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. This constant weight is therefore added to each block to ensure that Substrate budgets enough time to execute it. ## ExtrinsicBaseWeight -The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. -An *empty* extrinsic is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. +The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. +An *empty* extrinsic is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. The benchmark now constructs a block which is filled with only NO-OP extrinsics. -This block is then executed many times and the weights are measured. -The result is divided by the number of extrinsics in that block and the results are written to `extrinsic_weights.rs`. +This block is then executed many times and the weights are measured. +The result is divided by the number of extrinsics in that block and the results are written to `extrinsic_weights.rs`. -The relevant section in the output file looks like this: +The relevant section in the output file looks like this: ```rust /// Time to execute a NO-OP extrinsic, for example `System::remark`. /// Calculated by multiplying the *Average* with `1` and adding `0`. @@ -59,10 +59,10 @@ The relevant section in the output file looks like this: /// 99th: 68_758 /// 95th: 67_843 /// 75th: 67_749 -pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(67_745); +pub const ExtrinsicBaseWeight: Weight = 67_745 * WEIGHT_PER_NANOS; ``` -In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. +In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. This constant weight is therefore added to each extrinsic to ensure that Substrate budgets enough time to execute it. ## Invocation @@ -76,48 +76,48 @@ Output: ```pre # BlockExecutionWeight Running 10 warmups... -Executing block 100 times +Executing block 100 times Per-block execution overhead [ns]: Total: 353248430 Min: 3508416, Max: 3680498 Average: 3532484, Median: 3522111, Stddev: 27070.23 -Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435 +Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435 Writing weights to "block_weights.rs" # Setup -Building block, this takes some time... +Building block, this takes some time... Extrinsics per block: 12000 # ExtrinsicBaseWeight Running 10 warmups... -Executing block 100 times +Executing block 100 times Per-extrinsic execution overhead [ns]: Total: 6774590 Min: 67561, Max: 69855 Average: 67745, Median: 67701, Stddev: 264.68 -Percentiles 99th, 95th, 75th: 68758, 67843, 67749 +Percentiles 99th, 95th, 75th: 68758, 67843, 67749 Writing weights to "extrinsic_weights.rs" ``` -The complete command for Polkadot looks like this: +The complete command for Polkadot looks like this: ```sh cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --execution=wasm --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ ``` -This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. -You can try the same for *Rococo* and to see that the results slightly differ. +This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. +You can try the same for *Rococo* and to see that the results slightly differ. 👉 It is paramount to use `--profile=production`, `--execution=wasm` and `--wasm-execution=compiled` as the results are otherwise useless. ## Output Interpretation -Lower is better. The less weight the execution overhead needs, the better. -Since the weights of the overhead is charged per extrinsic and per block, a larger weight results in less extrinsics per block. +Lower is better. The less weight the execution overhead needs, the better. +Since the weights of the overhead is charged per extrinsic and per block, a larger weight results in less extrinsics per block. Minimizing this is important to have a large transaction throughput. ## Arguments -- `--chain` / `--dev` Set the chain specification. -- `--weight-path` Set the output directory or file to write the weights to. +- `--chain` / `--dev` Set the chain specification. +- `--weight-path` Set the output directory or file to write the weights to. - `--repeat` Set the repetitions of both benchmarks. - `--warmup` Set the rounds of warmup before measuring. - `--execution` Should be set to `wasm` for correct results. @@ -126,7 +126,6 @@ Minimizing this is important to have a large transaction throughput. - [`--add`](../shared/README.md#arguments) - [`--metric`](../shared/README.md#arguments) - [`--weight-path`](../shared/README.md#arguments) -- [`--header`](../shared/README.md#arguments) License: Apache-2.0 diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 74c1e7efc24d4..28ceea63f1572 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -23,12 +23,12 @@ use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::Backend as ClientBackend; use sc_service::Configuration; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; +use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; use clap::{Args, Parser}; use log::info; use serde::Serialize; -use std::{fmt::Debug, path::PathBuf, sync::Arc}; +use std::{fmt::Debug, sync::Arc}; use crate::{ extrinsic::{ @@ -69,18 +69,6 @@ pub struct OverheadParams { #[allow(missing_docs)] #[clap(flatten)] pub hostinfo: HostInfoParams, - - /// Add a header to the generated weight output file. - /// - /// Good for adding LICENSE headers. - #[arg(long, value_name = "PATH")] - pub header: Option, - - /// Enable the Trie cache. - /// - /// This should only be used for performance analysis and not for final results. - #[arg(long)] - pub enable_trie_cache: bool, } /// Type of a benchmark. @@ -102,7 +90,6 @@ impl OverheadCmd { cfg: Configuration, client: Arc, inherent_data: sp_inherents::InherentData, - digest_items: Vec, ext_builder: &dyn ExtrinsicBuilder, ) -> Result<()> where @@ -114,7 +101,7 @@ impl OverheadCmd { if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); } - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); // per-block execution overhead { @@ -162,12 +149,4 @@ impl CliConfiguration for OverheadCmd { fn import_params(&self) -> Option<&ImportParams> { Some(&self.import_params) } - - fn trie_cache_maximum_size(&self) -> Result> { - if self.params.enable_trie_cache { - Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) - } else { - Ok(None) - } - } } diff --git a/utils/frame/benchmarking-cli/src/overhead/template.rs b/utils/frame/benchmarking-cli/src/overhead/template.rs index ceed34d1981f9..aa82e45cf6db9 100644 --- a/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -51,8 +51,6 @@ pub(crate) struct TemplateData { hostname: String, /// CPU name of the machine that executed the benchmarks. cpuname: String, - /// Header for the generated file. - header: String, /// Command line arguments that were passed to the CLI. args: Vec, /// Params of the executed command. @@ -72,12 +70,6 @@ impl TemplateData { stats: &Stats, ) -> Result { let weight = params.weight.calc_weight(stats)?; - let header = params - .header - .as_ref() - .map(|p| std::fs::read_to_string(p)) - .transpose()? - .unwrap_or_default(); Ok(TemplateData { short_name: t.short_name().into(), @@ -87,7 +79,6 @@ impl TemplateData { date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(), hostname: params.hostinfo.hostname(), cpuname: params.hostinfo.cpuname(), - header, args: env::args().collect::>(), params: params.clone(), stats: stats.clone(), diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index 8d1a369372721..f8312b0052592 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -1,4 +1,20 @@ -{{header}} +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}} //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` @@ -13,8 +29,10 @@ // {{arg}} {{/each}} -use sp_core::parameter_types; -use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; +use frame_support::{ + parameter_types, + weights::{constants::WEIGHT_PER_NANOS, Weight}, +}; parameter_types! { {{#if (eq short_name "block")}} @@ -34,12 +52,12 @@ parameter_types! { /// 99th: {{underscore stats.p99}} /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} - pub const {{long_name}}Weight: Weight = WEIGHT_PER_NANOS.saturating_mul({{underscore weight}}); + pub const {{long_name}}Weight: Weight = {{underscore weight}} * WEIGHT_PER_NANOS; } #[cfg(test)] mod test_weights { - use sp_weights::constants; + use frame_support::weights::constants; /// Checks that the weight exists and is sane. // NOTE: If this test fails but you are sure that the generated values are fine, @@ -50,26 +68,14 @@ mod test_weights { {{#if (eq short_name "block")}} // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), - "Weight should be at least 100 µs." - ); + assert!(w >= 100 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), - "Weight should be at most 50 ms." - ); + assert!(w <= 50 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); {{else}} // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), - "Weight should be at least 10 µs." - ); + assert!(w >= 10 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), - "Weight should be at most 1 ms." - ); + assert!(w <= constants::WEIGHT_PER_MILLIS, "Weight should be at most 1 ms."); {{/if}} } } diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 242f0e685290f..fb6f1393650ad 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -40,9 +40,6 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_state_machine::StateMachine; use std::{collections::HashMap, fmt::Debug, fs, sync::Arc, time}; -/// Logging target -const LOG_TARGET: &'static str = "frame::benchmark::pallet"; - /// The inclusive range of a component. #[derive(Serialize, Debug, Clone, Eq, PartialEq)] pub(crate) struct ComponentRange { @@ -137,20 +134,6 @@ impl PalletCmd { }; } - if let Some(json_input) = &self.json_input { - let raw_data = match std::fs::read(json_input) { - Ok(raw_data) => raw_data, - Err(error) => - return Err(format!("Failed to read {:?}: {}", json_input, error).into()), - }; - let batches: Vec = match serde_json::from_slice(&raw_data) { - Ok(batches) => batches, - Err(error) => - return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()), - }; - return self.output_from_results(&batches) - } - let spec = config.chain_spec; let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); let pallet = self.pallet.clone().unwrap_or_default(); @@ -245,12 +228,6 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); for (pallet, extrinsic, components) in benchmarks_to_run { - log::info!( - target: LOG_TARGET, - "Starting benchmark: {}::{}", - String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), - ); let all_components = if components.is_empty() { vec![Default::default()] } else { @@ -272,8 +249,9 @@ impl PalletCmd { for s in 0..self.steps { // This is the value we will be testing for component `name` - let component_value = - ((lowest as f32 + step_size * s as f32) as u32).clamp(lowest, highest); + let component_value = ((lowest as f32 + step_size * s as f32) as u32) + .min(highest) + .max(lowest); // Select the max value for all the other components. let c: Vec<(BenchmarkParameter, u32)> = components @@ -300,15 +278,16 @@ impl PalletCmd { for (s, selected_components) in all_components.iter().enumerate() { // First we run a verification if !self.no_verify { + // Dont use these results since verification code will add overhead let state = &state_without_tracking; - let result = StateMachine::new( + let _results = StateMachine::new( state, &mut changes, &executor, "Benchmark_dispatch_benchmark", &( - &pallet, - &extrinsic, + &pallet.clone(), + &extrinsic.clone(), &selected_components.clone(), true, // run verification code 1, // no need to do internal repeats @@ -323,20 +302,6 @@ impl PalletCmd { .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; - // Dont use these results since verification code will add overhead. - let _batch = - , String> as Decode>::decode( - &mut &result[..], - ) - .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? - .map_err(|e| { - format!( - "Benchmark {}::{} failed: {}", - String::from_utf8_lossy(&pallet), - String::from_utf8_lossy(&extrinsic), - e - ) - })?; } // Do one loop of DB tracking. { @@ -406,9 +371,7 @@ impl PalletCmd { if let Ok(elapsed) = timer.elapsed() { if elapsed >= time::Duration::from_secs(5) { timer = time::SystemTime::now(); - log::info!( - target: LOG_TARGET, "Running Benchmark: {}.{}({} args) {}/{} {}/{}", String::from_utf8(pallet.clone()) .expect("Encoded from String; qed"), @@ -428,69 +391,25 @@ impl PalletCmd { // Combine all of the benchmark results, so that benchmarks of the same pallet/function // are together. - let batches = combine_batches(batches, batches_db); - self.output(&batches, &storage_info, &component_ranges) - } - - fn output( - &self, - batches: &[BenchmarkBatchSplitResults], - storage_info: &[StorageInfo], - component_ranges: &HashMap<(Vec, Vec), Vec>, - ) -> Result<()> { - // Jsonify the result and write it to a file or stdout if desired. - if !self.jsonify(&batches)? { - // Print the summary only if `jsonify` did not write to stdout. - self.print_summary(&batches, &storage_info) - } + let batches: Vec = combine_batches(batches, batches_db); // Create the weights.rs file. if let Some(output_path) = &self.output { writer::write_results(&batches, &storage_info, &component_ranges, output_path, self)?; } - Ok(()) - } - - fn output_from_results(&self, batches: &[BenchmarkBatchSplitResults]) -> Result<()> { - let mut component_ranges = - HashMap::<(Vec, Vec), HashMap>::new(); - for batch in batches { - let range = component_ranges - .entry((batch.pallet.clone(), batch.benchmark.clone())) - .or_default(); - for result in &batch.time_results { - for (param, value) in &result.components { - let name = param.to_string(); - let (ref mut min, ref mut max) = range.entry(name).or_insert((*value, *value)); - if *value < *min { - *min = *value; - } - if *value > *max { - *max = *value; - } - } - } + // Jsonify the result and write it to a file or stdout if desired. + if !self.jsonify(&batches)? { + // Print the summary only if `jsonify` did not write to stdout. + self.print_summary(&batches, &storage_info) } - - let component_ranges: HashMap<_, _> = component_ranges - .into_iter() - .map(|(key, ranges)| { - let ranges = ranges - .into_iter() - .map(|(name, (min, max))| ComponentRange { name, min, max }) - .collect(); - (key, ranges) - }) - .collect(); - - self.output(batches, &[], &component_ranges) + Ok(()) } /// Jsonifies the passed batches and writes them to stdout or into a file. /// Can be configured via `--json` and `--json-file`. /// Returns whether it wrote to stdout. - fn jsonify(&self, batches: &[BenchmarkBatchSplitResults]) -> Result { + fn jsonify(&self, batches: &Vec) -> Result { if self.json_output || self.json_file.is_some() { let json = serde_json::to_string_pretty(&batches) .map_err(|e| format!("Serializing into JSON: {:?}", e))?; @@ -498,7 +417,7 @@ impl PalletCmd { if let Some(path) = &self.json_file { fs::write(path, json)?; } else { - print!("{json}"); + println!("{}", json); return Ok(true) } } @@ -507,7 +426,11 @@ impl PalletCmd { } /// Prints the results as human-readable summary without raw timing data. - fn print_summary(&self, batches: &[BenchmarkBatchSplitResults], storage_info: &[StorageInfo]) { + fn print_summary( + &self, + batches: &Vec, + storage_info: &Vec, + ) { for batch in batches.iter() { // Print benchmark metadata println!( diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index b10f531bc0aed..7beaf321a2927 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -27,69 +27,69 @@ use std::{fmt::Debug, path::PathBuf}; // Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be // used like crate names with `_` -fn parse_pallet_name(pallet: &str) -> std::result::Result { - Ok(pallet.replace("-", "_")) +fn parse_pallet_name(pallet: &str) -> String { + pallet.replace("-", "_") } /// Benchmark the extrinsic weight of FRAME Pallets. #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input"])] + #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present = "list")] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[arg(short, long, required_unless_present_any = ["list", "json_input"])] + #[clap(short, long, required_unless_present = "list")] pub extrinsic: Option, /// Select how many samples we should take across the variable components. - #[arg(short, long, default_value_t = 2)] + #[clap(short, long, default_value = "1")] pub steps: u32, /// Indicates lowest values for each of the component ranges. - #[arg(long = "low", value_delimiter = ',')] + #[clap(long = "low", use_value_delimiter = true)] pub lowest_range_values: Vec, /// Indicates highest values for each of the component ranges. - #[arg(long = "high", value_delimiter = ',')] + #[clap(long = "high", use_value_delimiter = true)] pub highest_range_values: Vec, /// Select how many repetitions of this benchmark should run from within the wasm. - #[arg(short, long, default_value_t = 1)] + #[clap(short, long, default_value = "1")] pub repeat: u32, /// Select how many repetitions of this benchmark should run from the client. /// /// NOTE: Using this alone may give slower results, but will afford you maximum Wasm memory. - #[arg(long, default_value_t = 1)] + #[clap(long, default_value = "1")] pub external_repeat: u32, /// Print the raw results in JSON format. - #[arg(long = "json")] + #[clap(long = "json")] pub json_output: bool, /// Write the raw results in JSON format into the given file. - #[arg(long, conflicts_with = "json_output")] + #[clap(long, conflicts_with = "json-output")] pub json_file: Option, /// Don't print the median-slopes linear regression analysis. - #[arg(long)] + #[clap(long)] pub no_median_slopes: bool, /// Don't print the min-squares linear regression analysis. - #[arg(long)] + #[clap(long)] pub no_min_squares: bool, /// Output the benchmarks to a Rust file at the given path. - #[arg(long)] + #[clap(long)] pub output: Option, /// Add a header file to your outputted benchmarks. - #[arg(long)] + #[clap(long)] pub header: Option, /// Path to Handlebars template file used for outputting benchmark results. (Optional) - #[arg(long)] + #[clap(long)] pub template: Option, #[allow(missing_docs)] @@ -100,25 +100,25 @@ pub struct PalletCmd { /// * min-squares (default) /// * median-slopes /// * max (max of min squares and median slopes for each value) - #[arg(long)] + #[clap(long)] pub output_analysis: Option, /// Set the heap pages while running benchmarks. If not set, the default value from the client /// is used. - #[arg(long)] + #[clap(long)] pub heap_pages: Option, /// Disable verification logic when running benchmarks. - #[arg(long)] + #[clap(long)] pub no_verify: bool, /// Display and run extra benchmarks that would otherwise not be needed for weight /// construction. - #[arg(long)] + #[clap(long)] pub extra: bool, /// Estimate PoV size. - #[arg(long)] + #[clap(long)] pub record_proof: bool, #[allow(missing_docs)] @@ -126,50 +126,44 @@ pub struct PalletCmd { pub shared_params: sc_cli::SharedParams, /// The execution strategy that should be used for benchmarks. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] pub execution: Option, /// Method for executing Wasm runtime code. - #[arg( + #[clap( long = "wasm-execution", value_name = "METHOD", - value_enum, + possible_values = WasmExecutionMethod::variants(), ignore_case = true, - default_value_t = DEFAULT_WASM_EXECUTION_METHOD, + default_value = DEFAULT_WASM_EXECUTION_METHOD, )] pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. /// /// Only has an effect when `wasm-execution` is set to `compiled`. - #[arg( + #[clap( long = "wasm-instantiation-strategy", value_name = "STRATEGY", default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - value_enum, + arg_enum, )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Limit the memory the database cache can use. - #[arg(long = "db-cache", value_name = "MiB", default_value_t = 1024)] + #[clap(long = "db-cache", value_name = "MiB", default_value = "1024")] pub database_cache_size: u32, /// List the benchmarks that match your query rather than running them. /// /// When nothing is provided, we list all benchmarks. - #[arg(long)] + #[clap(long)] pub list: bool, /// If enabled, the storage info is not displayed in the output next to the analysis. /// /// This is independent of the storage info appearing in the *output file*. Use a Handlebar /// template for that purpose. - #[arg(long)] + #[clap(long)] pub no_storage_info: bool, - - /// A path to a `.json` file with existing benchmark results generated with `--json` or - /// `--json-file`. When specified the benchmarks are not actually executed, and the data for - /// the analysis is read from this file. - #[arg(long)] - pub json_input: Option, } diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 7e2e0688d654f..688ad4d3934f5 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -33,23 +33,22 @@ impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. - Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) + ({{underscore benchmark.base_weight}} as Weight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as u64).saturating_mul({{cw.name}} as u64)) + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as u64)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as u64).saturating_mul({{cr.name}} as u64))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as u64)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as u64).saturating_mul({{cw.name}} as u64))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) {{/each}} } {{/each}} diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index a52bbcd229cb1..42a237fcf3ce3 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -24,12 +24,12 @@ use std::{ }; use inflector::Inflector; -use itertools::Itertools; use serde::Serialize; use crate::{pallet::command::ComponentRange, shared::UnderscoreHelper, PalletCmd}; use frame_benchmarking::{ Analysis, AnalysisChoice, BenchmarkBatchSplitResults, BenchmarkResult, BenchmarkSelector, + RegressionModel, }; use frame_support::traits::StorageInfo; use sp_core::hexdisplay::HexDisplay; @@ -69,8 +69,6 @@ struct BenchmarkData { component_writes: Vec, component_ranges: Vec, comments: Vec, - #[serde(serialize_with = "string_serialize")] - min_execution_time: u128, } // This forwards some specific metadata from the `PalletCmd` @@ -147,15 +145,13 @@ fn map_results( Ok(all_benchmarks) } -// Get an iterator of errors. -fn extract_errors(errors: &Option>) -> impl Iterator + '_ { - errors - .as_ref() - .map(|e| e.as_slice()) - .unwrap_or(&[]) - .iter() - .copied() - .chain(std::iter::repeat(0)) +// Get an iterator of errors from a model. If the model is `None` all errors are zero. +fn extract_errors(model: &Option) -> impl Iterator + '_ { + let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter()); + std::iter::from_fn(move || match &mut errors { + Some(model) => model.next().map(|val| *val as u128), + _ => Some(0), + }) } // Analyze and return the relevant results for a given benchmark. @@ -194,20 +190,24 @@ fn get_benchmark_data( .slopes .into_iter() .zip(extrinsic_time.names.iter()) - .zip(extract_errors(&extrinsic_time.errors)) + .zip(extract_errors(&extrinsic_time.model)) .for_each(|((slope, name), error)| { if !slope.is_zero() { if !used_components.contains(&name) { used_components.push(name); } - used_extrinsic_time.push(ComponentSlope { name: name.clone(), slope, error }); + used_extrinsic_time.push(ComponentSlope { + name: name.clone(), + slope: slope.saturating_mul(1000), + error: error.saturating_mul(1000), + }); } }); reads .slopes .into_iter() .zip(reads.names.iter()) - .zip(extract_errors(&reads.errors)) + .zip(extract_errors(&reads.model)) .for_each(|((slope, name), error)| { if !slope.is_zero() { if !used_components.contains(&name) { @@ -220,7 +220,7 @@ fn get_benchmark_data( .slopes .into_iter() .zip(writes.names.iter()) - .zip(extract_errors(&writes.errors)) + .zip(extract_errors(&writes.model)) .for_each(|((slope, name), error)| { if !slope.is_zero() { if !used_components.contains(&name) { @@ -251,7 +251,7 @@ fn get_benchmark_data( BenchmarkData { name: String::from_utf8(batch.benchmark.clone()).unwrap(), components, - base_weight: extrinsic_time.base, + base_weight: extrinsic_time.base.saturating_mul(1000), base_reads: reads.base, base_writes: writes.base, component_weight: used_extrinsic_time, @@ -259,7 +259,6 @@ fn get_benchmark_data( component_writes: used_writes, component_ranges, comments, - min_execution_time: extrinsic_time.minimum, } } @@ -318,21 +317,18 @@ pub(crate) fn write_results( // Organize results by pallet into a JSON map let all_results = map_results(batches, storage_info, component_ranges, &analysis_choice)?; - let mut created_files = Vec::new(); - for ((pallet, instance), results) in all_results.iter() { let mut file_path = path.clone(); // If a user only specified a directory... if file_path.is_dir() { - // Start with "path/to/pallet_name". - let mut file_name = pallet.clone(); // Check if there might be multiple instances benchmarked. if all_results.keys().any(|(p, i)| p == pallet && i != instance) { - // Append "_instance_name". - file_name = format!("{}_{}", file_name, instance.to_snake_case()); + // Create new file: "path/to/pallet_name_instance_name.rs". + file_path.push(pallet.clone() + "_" + instance.to_snake_case().as_str()); + } else { + // Create new file: "path/to/pallet_name.rs". + file_path.push(pallet.clone()); } - // "mod::pallet_name.rs" becomes "mod_pallet_name.rs". - file_path.push(file_name.replace("::", "_")); file_path.set_extension("rs"); } @@ -349,18 +345,10 @@ pub(crate) fn write_results( benchmarks: results.clone(), }; - let mut output_file = fs::File::create(&file_path)?; + let mut output_file = fs::File::create(file_path)?; handlebars .render_template_to_write(&template, &hbs_data, &mut output_file) .map_err(|e| io_error(&e.to_string()))?; - println!("Created file: {:?}", &file_path); - created_files.push(file_path); - } - - for file in created_files.iter().duplicates() { - // This can happen when there are multiple instances of a pallet deployed - // and `--output` forces the output of all instances into the same file. - println!("Multiple benchmarks were written to the same file: {:?}.", file); } Ok(()) } diff --git a/utils/frame/benchmarking-cli/src/shared/README.md b/utils/frame/benchmarking-cli/src/shared/README.md index 08e25b0e08f76..2a3719b85498c 100644 --- a/utils/frame/benchmarking-cli/src/shared/README.md +++ b/utils/frame/benchmarking-cli/src/shared/README.md @@ -11,6 +11,5 @@ Contains code that is shared among multiple sub-commands. - `--db` The database backend to use. This depends on your snapshot. - `--pruning` Set the pruning mode of the node. Some benchmarks require you to set this to `archive`. - `--base-path` The location on the disk that should be used for the benchmarks. You can try this on different disks or even on a mounted RAM-disk. It is important to use the same location that will later-on be used to store the chain data to get the correct results. -- `--header` Optional file header which will be prepended to the weight output file. Can be used for adding LICENSE headers. License: Apache-2.0 diff --git a/utils/frame/benchmarking-cli/src/shared/mod.rs b/utils/frame/benchmarking-cli/src/shared/mod.rs index ea5415f33f020..33189792c4008 100644 --- a/utils/frame/benchmarking-cli/src/shared/mod.rs +++ b/utils/frame/benchmarking-cli/src/shared/mod.rs @@ -95,22 +95,22 @@ pub fn check_build_profile() -> Result<(), String> { /// Parameters to configure how the host info will be determined. #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] -#[command(rename_all = "kebab-case")] +#[clap(rename_all = "kebab-case")] pub struct HostInfoParams { /// Manually override the hostname to use. - #[arg(long)] + #[clap(long)] pub hostname_override: Option, /// Specify a fallback hostname if no-one could be detected automatically. /// /// Note: This only exists to make the `hostname` function infallible. - #[arg(long, default_value = "")] + #[clap(long, default_value = "")] pub hostname_fallback: String, /// Specify a fallback CPU name if no-one could be detected automatically. /// /// Note: This only exists to make the `cpuname` function infallible. - #[arg(long, default_value = "")] + #[clap(long, default_value = "")] pub cpuname_fallback: String, } diff --git a/utils/frame/benchmarking-cli/src/shared/stats.rs b/utils/frame/benchmarking-cli/src/shared/stats.rs index ffae4a17724f8..3234d5f2f94f7 100644 --- a/utils/frame/benchmarking-cli/src/shared/stats.rs +++ b/utils/frame/benchmarking-cli/src/shared/stats.rs @@ -112,7 +112,7 @@ impl Stats { /// Returns the specified percentile for the given data. /// This is best effort since it ignores the interpolation case. fn percentile(mut xs: Vec, p: f64) -> u64 { - xs.sort(); + xs.sort_unstable(); let index = (xs.len() as f64 * p).ceil() as usize - 1; xs[index.clamp(0, xs.len() - 1)] } diff --git a/utils/frame/benchmarking-cli/src/shared/weight_params.rs b/utils/frame/benchmarking-cli/src/shared/weight_params.rs index 030bbfa00d468..4dd80cd41ff3d 100644 --- a/utils/frame/benchmarking-cli/src/shared/weight_params.rs +++ b/utils/frame/benchmarking-cli/src/shared/weight_params.rs @@ -31,23 +31,23 @@ pub struct WeightParams { /// File or directory to write the *weight* files to. /// /// For Substrate this should be `frame/support/src/weights`. - #[arg(long)] + #[clap(long)] pub weight_path: Option, /// Select a specific metric to calculate the final weight output. - #[arg(long = "metric", default_value = "average")] + #[clap(long = "metric", default_value = "average")] pub weight_metric: StatSelect, /// Multiply the resulting weight with the given factor. Must be positive. /// /// Is applied before `weight_add`. - #[arg(long = "mul", default_value_t = 1.0)] + #[clap(long = "mul", default_value = "1")] pub weight_mul: f64, /// Add the given offset to the resulting weight. /// /// Is applied after `weight_mul`. - #[arg(long = "add", default_value_t = 0)] + #[clap(long = "add", default_value = "0")] pub weight_add: u64, } diff --git a/utils/frame/benchmarking-cli/src/storage/README.md b/utils/frame/benchmarking-cli/src/storage/README.md index ecaf4edadab38..820785f7ea20c 100644 --- a/utils/frame/benchmarking-cli/src/storage/README.md +++ b/utils/frame/benchmarking-cli/src/storage/README.md @@ -97,7 +97,6 @@ write: 71_347 * constants::WEIGHT_PER_NANOS, - [`--weight-path`](../shared/README.md#arguments) - `--json-read-path` Write the raw 'read' results to this file or directory. - `--json-write-path` Write the raw 'write' results to this file or directory. -- [`--header`](../shared/README.md#arguments) License: Apache-2.0 diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index ce2d52e57d641..b8264dc009232 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -24,7 +24,7 @@ use sp_core::storage::StorageKey; use sp_database::{ColumnId, Database}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_state_machine::Storage; -use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; +use sp_storage::StateVersion; use clap::{Args, Parser}; use log::info; @@ -68,55 +68,37 @@ pub struct StorageParams { pub hostinfo: HostInfoParams, /// Skip the `read` benchmark. - #[arg(long)] + #[clap(long)] pub skip_read: bool, /// Skip the `write` benchmark. - #[arg(long)] + #[clap(long)] pub skip_write: bool, /// Specify the Handlebars template to use for outputting benchmark results. - #[arg(long)] + #[clap(long)] pub template_path: Option, - /// Add a header to the generated weight output file. - /// - /// Good for adding LICENSE headers. - #[arg(long, value_name = "PATH")] - pub header: Option, - /// Path to write the raw 'read' results in JSON format to. Can be a file or directory. - #[arg(long)] + #[clap(long)] pub json_read_path: Option, /// Path to write the raw 'write' results in JSON format to. Can be a file or directory. - #[arg(long)] + #[clap(long)] pub json_write_path: Option, /// Rounds of warmups before measuring. - #[arg(long, default_value_t = 1)] + #[clap(long, default_value = "1")] pub warmups: u32, /// The `StateVersion` to use. Substrate `--dev` should use `V1` and Polkadot `V0`. /// Selecting the wrong version can corrupt the DB. - #[arg(long, value_parser = clap::value_parser!(u8).range(0..=1))] + #[clap(long, possible_values = ["0", "1"])] pub state_version: u8, - /// Trie cache size in bytes. - /// - /// Providing `0` will disable the cache. - #[arg(long, value_name = "Bytes", default_value_t = 67108864)] - pub trie_cache_size: usize, - - /// Enable the Trie cache. - /// - /// This should only be used for performance analysis and not for final results. - #[arg(long)] - pub enable_trie_cache: bool, - - /// Include child trees in benchmark. - #[arg(long)] - pub include_child_trees: bool, + /// State cache size. + #[clap(long, default_value = "0")] + pub state_cache_size: usize, } impl StorageCmd { @@ -134,7 +116,7 @@ impl StorageCmd { Block: BlockT, C: UsageProvider + StorageProvider + HeaderBackend, { - let mut template = TemplateData::new(&cfg, &self.params)?; + let mut template = TemplateData::new(&cfg, &self.params); let block_id = BlockId::::Number(client.usage_info().chain.best_number); template.set_block_number(block_id.to_string()); @@ -173,16 +155,6 @@ impl StorageCmd { } } - /// Returns Some if child node and None if regular - pub(crate) fn is_child_key(&self, key: Vec) -> Option { - if let Some((ChildType::ParentKeyId, storage_key)) = - ChildType::from_prefixed_key(&PrefixedStorageKey::new(key)) - { - return Some(ChildInfo::new_default(storage_key)) - } - None - } - /// Run some rounds of the (read) benchmark as warmup. /// See `frame_benchmarking_cli::storage::read::bench_read` for detailed comments. fn bench_warmup(&self, client: &Arc) -> Result<()> @@ -191,17 +163,17 @@ impl StorageCmd { B: BlockT + Debug, BA: ClientBackend, { - let hash = client.usage_info().chain.best_hash; + let block = BlockId::Number(client.usage_info().chain.best_number); let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(hash, &empty_prefix)?; + let mut keys = client.storage_keys(&block, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); for i in 0..self.params.warmups { info!("Warmup round {}/{}", i + 1, self.params.warmups); - for key in keys.as_slice() { + for key in keys.clone() { let _ = client - .storage(hash, &key) + .storage(&block, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty"); } @@ -225,11 +197,7 @@ impl CliConfiguration for StorageCmd { Some(&self.pruning_params) } - fn trie_cache_maximum_size(&self) -> Result> { - if self.params.enable_trie_cache && self.params.trie_cache_size > 0 { - Ok(Some(self.params.trie_cache_size)) - } else { - Ok(None) - } + fn state_cache_size(&self) -> Result { + Ok(self.params.state_cache_size) } } diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index 20c41e4a5196b..c1dc6daba0953 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -18,7 +18,10 @@ use sc_cli::Result; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sp_core::storage::StorageKey; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; use log::info; use rand::prelude::*; @@ -38,52 +41,25 @@ impl StorageCmd { <::Header as HeaderT>::Number: From, { let mut record = BenchRecord::default(); - let best_hash = client.usage_info().chain.best_hash; + let block = BlockId::Number(client.usage_info().chain.best_number); - info!("Preparing keys from block {}", best_hash); + info!("Preparing keys from block {}", block); // Load all keys and randomly shuffle them. let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(best_hash, &empty_prefix)?; + let mut keys = client.storage_keys(&block, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); - let mut child_nodes = Vec::new(); // Interesting part here: // Read all the keys in the database and measure the time it takes to access each. info!("Reading {} keys", keys.len()); - for key in keys.as_slice() { - match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { - (true, Some(info)) => { - // child tree key - let child_keys = client.child_storage_keys(best_hash, &info, &empty_prefix)?; - for ck in child_keys { - child_nodes.push((ck.clone(), info.clone())); - } - }, - _ => { - // regular key - let start = Instant::now(); - let v = client - .storage(best_hash, &key) - .expect("Checked above to exist") - .ok_or("Value unexpectedly empty")?; - record.append(v.0.len(), start.elapsed())?; - }, - } - } - - if self.params.include_child_trees { - child_nodes.shuffle(&mut rng); - - info!("Reading {} child keys", child_nodes.len()); - for (key, info) in child_nodes.as_slice() { - let start = Instant::now(); - let v = client - .child_storage(best_hash, info, key) - .expect("Checked above to exist") - .ok_or("Value unexpectedly empty")?; - record.append(v.0.len(), start.elapsed())?; - } + for key in keys.clone() { + let start = Instant::now(); + let v = client + .storage(&block, &key) + .expect("Checked above to exist") + .ok_or("Value unexpectedly empty")?; + record.append(v.0.len(), start.elapsed())?; } Ok(record) } diff --git a/utils/frame/benchmarking-cli/src/storage/template.rs b/utils/frame/benchmarking-cli/src/storage/template.rs index ebc415ccb8189..20fbd58134f20 100644 --- a/utils/frame/benchmarking-cli/src/storage/template.rs +++ b/utils/frame/benchmarking-cli/src/storage/template.rs @@ -45,8 +45,6 @@ pub(crate) struct TemplateData { hostname: String, /// CPU name of the machine that executed the benchmarks. cpuname: String, - /// Header for the generated file. - header: String, /// Command line arguments that were passed to the CLI. args: Vec, /// Storage params of the executed command. @@ -65,26 +63,18 @@ pub(crate) struct TemplateData { impl TemplateData { /// Returns a new [`Self`] from the given configuration. - pub fn new(cfg: &Configuration, params: &StorageParams) -> Result { - let header = params - .header - .as_ref() - .map(|p| std::fs::read_to_string(p)) - .transpose()? - .unwrap_or_default(); - - Ok(TemplateData { + pub fn new(cfg: &Configuration, params: &StorageParams) -> Self { + TemplateData { db_name: format!("{}", cfg.database), runtime_name: cfg.chain_spec.name().into(), version: VERSION.into(), date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(), hostname: params.hostinfo.hostname(), cpuname: params.hostinfo.cpuname(), - header, args: env::args().collect::>(), params: params.clone(), ..Default::default() - }) + } } /// Sets the stats and calculates the final weights. diff --git a/utils/frame/benchmarking-cli/src/storage/weights.hbs b/utils/frame/benchmarking-cli/src/storage/weights.hbs index 82e581cf990c8..8c19aaa0dff36 100644 --- a/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -1,4 +1,20 @@ -{{header}} +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}} //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` @@ -17,9 +33,10 @@ /// Storage DB weights for the `{{runtime_name}}` runtime and `{{db_name}}`. pub mod constants { - use frame_support::weights::constants; - use sp_core::parameter_types; - use sp_weights::RuntimeDbWeight; + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; parameter_types! { {{#if (eq db_name "ParityDb")}} @@ -65,7 +82,7 @@ pub mod constants { #[cfg(test)] mod test_db_weights { use super::constants::{{db_name}}Weight as W; - use sp_weights::constants; + use frame_support::weights::constants; /// Checks that all weights exist and have sane values. // NOTE: If this test fails but you are sure that the generated values are fine, @@ -74,20 +91,20 @@ pub mod constants { fn bound() { // At least 1 µs. assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().reads(1) >= constants::WEIGHT_PER_MICROS, "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), + W::get().writes(1) >= constants::WEIGHT_PER_MICROS, "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().reads(1) <= constants::WEIGHT_PER_MILLIS, "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + W::get().writes(1) <= constants::WEIGHT_PER_MILLIS, "Write weight should be at most 1 ms." ); } diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 55a7b60d55552..ab25109a35d49 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -16,8 +16,8 @@ // limitations under the License. use sc_cli::Result; -use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; -use sc_client_db::{DbHash, DbState, DbStateBuilder}; +use sc_client_api::UsageProvider; +use sc_client_db::{DbHash, DbState}; use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; @@ -29,12 +29,7 @@ use sp_trie::PrefixedMemoryDB; use log::{info, trace}; use rand::prelude::*; -use sp_storage::{ChildInfo, StateVersion}; -use std::{ - fmt::Debug, - sync::Arc, - time::{Duration, Instant}, -}; +use std::{fmt::Debug, sync::Arc, time::Instant}; use super::cmd::StorageCmd; use crate::shared::{new_rng, BenchRecord}; @@ -42,7 +37,7 @@ use crate::shared::{new_rng, BenchRecord}; impl StorageCmd { /// Benchmarks the time it takes to write a single Storage item. /// Uses the latest state that is available for the given client. - pub(crate) fn bench_write( + pub(crate) fn bench_write( &self, client: Arc, (db, state_col): (Arc>, ColumnId), @@ -51,112 +46,65 @@ impl StorageCmd { where Block: BlockT
+ Debug, H: HeaderT, - BA: ClientBackend, - C: UsageProvider + HeaderBackend + StorageProvider, + C: UsageProvider + HeaderBackend, { // Store the time that it took to write each value. let mut record = BenchRecord::default(); - let best_hash = client.usage_info().chain.best_hash; - let header = client.header(BlockId::Hash(best_hash))?.ok_or("Header not found")?; + let block = BlockId::Number(client.usage_info().chain.best_number); + let header = client.header(block)?.ok_or("Header not found")?; let original_root = *header.state_root(); - let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); + let trie = DbState::::new(storage.clone(), original_root); - info!("Preparing keys from block {}", best_hash); + info!("Preparing keys from block {}", block); // Load all KV pairs and randomly shuffle them. let mut kvs = trie.pairs(); let (mut rng, _) = new_rng(None); kvs.shuffle(&mut rng); - info!("Writing {} keys", kvs.len()); - - let mut child_nodes = Vec::new(); // Generate all random values first; Make sure there are no collisions with existing // db entries, so we can rollback all additions without corrupting existing entries. - for (k, original_v) in kvs { - match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { - (true, Some(info)) => { - let child_keys = - client.child_storage_keys_iter(best_hash, info.clone(), None, None)?; - for ck in child_keys { - child_nodes.push((ck.clone(), info.clone())); - } - }, - _ => { - // regular key - let mut new_v = vec![0; original_v.len()]; - loop { - // Create a random value to overwrite with. - // NOTE: We use a possibly higher entropy than the original value, - // could be improved but acts as an over-estimation which is fine for now. - rng.fill_bytes(&mut new_v[..]); - if check_new_value::( - db.clone(), - &trie, - &k.to_vec(), - &new_v, - self.state_version(), - state_col, - None, - ) { - break - } - } - - // Write each value in one commit. - let (size, duration) = measure_write::( - db.clone(), - &trie, - k.to_vec(), - new_v.to_vec(), - self.state_version(), - state_col, - None, - )?; - record.append(size, duration)?; - }, - } - } - - if self.params.include_child_trees { - child_nodes.shuffle(&mut rng); - info!("Writing {} child keys", child_nodes.len()); - - for (key, info) in child_nodes { - if let Some(original_v) = client - .child_storage(best_hash, &info.clone(), &key) - .expect("Checked above to exist") - { - let mut new_v = vec![0; original_v.0.len()]; - loop { - rng.fill_bytes(&mut new_v[..]); - if check_new_value::( - db.clone(), - &trie, - &key.0, - &new_v, - self.state_version(), - state_col, - Some(&info), - ) { - break + for (k, original_v) in kvs.iter_mut() { + 'retry: loop { + let mut new_v = vec![0; original_v.len()]; + // Create a random value to overwrite with. + // NOTE: We use a possibly higher entropy than the original value, + // could be improved but acts as an over-estimation which is fine for now. + rng.fill_bytes(&mut new_v[..]); + let new_kv = vec![(k.as_ref(), Some(new_v.as_ref()))]; + let (_, mut stx) = trie.storage_root(new_kv.iter().cloned(), self.state_version()); + for (mut k, (_, rc)) in stx.drain().into_iter() { + if rc > 0 { + db.sanitize_key(&mut k); + if db.get(state_col, &k).is_some() { + trace!("Benchmark-store key creation: Key collision detected, retry"); + continue 'retry } } - - let (size, duration) = measure_write::( - db.clone(), - &trie, - key.0, - new_v.to_vec(), - self.state_version(), - state_col, - Some(&info), - )?; - record.append(size, duration)?; } + *original_v = new_v; + break } } + info!("Writing {} keys", kvs.len()); + // Write each value in one commit. + for (k, new_v) in kvs.iter() { + // Interesting part here: + let start = Instant::now(); + // Create a TX that will modify the Trie in the DB and + // calculate the root hash of the Trie after the modification. + let replace = vec![(k.as_ref(), Some(new_v.as_ref()))]; + let (_, stx) = trie.storage_root(replace.iter().cloned(), self.state_version()); + // Only the keep the insertions, since we do not want to benchmark pruning. + let tx = convert_tx::(db.clone(), stx.clone(), false, state_col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + record.append(new_v.len(), start.elapsed())?; + + // Now undo the changes by removing what was added. + let tx = convert_tx::(db.clone(), stx.clone(), true, state_col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + } Ok(record) } } @@ -186,62 +134,3 @@ fn convert_tx( } ret } - -/// Measures write benchmark -/// if `child_info` exist then it means this is a child tree key -fn measure_write( - db: Arc>, - trie: &DbState, - key: Vec, - new_v: Vec, - version: StateVersion, - col: ColumnId, - child_info: Option<&ChildInfo>, -) -> Result<(usize, Duration)> { - let start = Instant::now(); - // Create a TX that will modify the Trie in the DB and - // calculate the root hash of the Trie after the modification. - let replace = vec![(key.as_ref(), Some(new_v.as_ref()))]; - let stx = match child_info { - Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2, - None => trie.storage_root(replace.iter().cloned(), version).1, - }; - // Only the keep the insertions, since we do not want to benchmark pruning. - let tx = convert_tx::(db.clone(), stx.clone(), false, col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; - let result = (new_v.len(), start.elapsed()); - - // Now undo the changes by removing what was added. - let tx = convert_tx::(db.clone(), stx.clone(), true, col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; - Ok(result) -} - -/// Checks if a new value causes any collision in tree updates -/// returns true if there is no collision -/// if `child_info` exist then it means this is a child tree key -fn check_new_value( - db: Arc>, - trie: &DbState, - key: &Vec, - new_v: &Vec, - version: StateVersion, - col: ColumnId, - child_info: Option<&ChildInfo>, -) -> bool { - let new_kv = vec![(key.as_ref(), Some(new_v.as_ref()))]; - let mut stx = match child_info { - Some(info) => trie.child_storage_root(info, new_kv.iter().cloned(), version).2, - None => trie.storage_root(new_kv.iter().cloned(), version).1, - }; - for (mut k, (_, rc)) in stx.drain().into_iter() { - if rc > 0 { - db.sanitize_key(&mut k); - if db.get(col, &k).is_some() { - trace!("Benchmark-store key creation: Key collision detected, retry"); - return false - } - } - } - true -} diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 89e9ee79db214..f01e2f1a5d51f 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } diff --git a/utils/frame/frame-utilities-cli/src/pallet_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs index 2a80e3a3d312d..4ad82a01c2433 100644 --- a/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -28,30 +28,31 @@ use sp_runtime::traits::AccountIdConversion; /// The `palletid` command #[derive(Debug, Parser)] -#[command(name = "palletid", about = "Inspect a module ID address")] +#[clap(name = "palletid", about = "Inspect a module ID address")] pub struct PalletIdCmd { /// The module ID used to derive the account id: String, /// network address format - #[arg( + #[clap( long, value_name = "NETWORK", - value_parser = sc_cli::parse_ss58_address_format, + possible_values = &Ss58AddressFormat::all_names()[..], + parse(try_from_str = Ss58AddressFormat::try_from), ignore_case = true, )] pub network: Option, #[allow(missing_docs)] - #[command(flatten)] + #[clap(flatten)] pub output_scheme: OutputTypeFlag, #[allow(missing_docs)] - #[command(flatten)] + #[clap(flatten)] pub crypto_scheme: CryptoSchemeFlag, #[allow(missing_docs)] - #[command(flatten)] + #[clap(flatten)] pub keystore_params: KeystoreParams, } diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index b8ad97cc6b6fa..34d62ab0d8b5f 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -12,7 +12,7 @@ readme = "README.md" [dependencies] # FRAME frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } -frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support", features = ["runtime-benchmarks"] } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-staking = { version = "4.0.0-dev", path = "../../../frame/staking" } @@ -22,4 +22,4 @@ sp-io = { version = "6.0.0", path = "../../../primitives/io" } # third party chrono = { version = "0.4.19" } git2 = { version = "0.14.2", default-features = false } -num-format = "0.4.3" +num-format = { version = "0.4.0" } diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index 6cc14a0595501..5af7dd78a08e8 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -14,4 +14,4 @@ kitchensink-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runt generate-bags = { version = "4.0.0-dev", path = "../" } # third-party -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } diff --git a/utils/frame/generate-bags/node-runtime/src/main.rs b/utils/frame/generate-bags/node-runtime/src/main.rs index 27e51b205f8ce..5ea1262d95d34 100644 --- a/utils/frame/generate-bags/node-runtime/src/main.rs +++ b/utils/frame/generate-bags/node-runtime/src/main.rs @@ -25,19 +25,19 @@ use std::path::PathBuf; // #[clap(author, version, about)] struct Opt { /// How many bags to generate. - #[arg(long, default_value_t = 200)] + #[clap(long, default_value = "200")] n_bags: usize, /// Where to write the output. output: PathBuf, /// The total issuance of the currency used to create `VoteWeight`. - #[arg(short, long)] + #[clap(short, long)] total_issuance: u128, /// The minimum account balance (i.e. existential deposit) for the currency used to create /// `VoteWeight`. - #[arg(short, long)] + #[clap(short, long)] minimum_balance: u128, } diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs index 23da131a668d8..d4507c3be33ef 100644 --- a/utils/frame/generate-bags/src/lib.rs +++ b/utils/frame/generate-bags/src/lib.rs @@ -207,10 +207,6 @@ pub fn generate_thresholds( writeln!(buf, "//! Autogenerated bag thresholds.")?; writeln!(buf, "//!")?; writeln!(buf, "//! Generated on {}", now.to_rfc3339())?; - writeln!(buf, "//! Arguments")?; - writeln!(buf, "//! Total issuance: {}", &total_issuance)?; - writeln!(buf, "//! Minimum balance: {}", &minimum_balance)?; - writeln!( buf, "//! for the {} runtime.", @@ -238,17 +234,6 @@ pub fn generate_thresholds( writeln!(buf)?; writeln!(buf, "/// Upper thresholds delimiting the bag list.")?; writeln!(buf, "pub const THRESHOLDS: [u64; {}] = [", thresholds.len())?; - for threshold in &thresholds { - num_buf.write_formatted(threshold, &format); - // u64::MAX, with spacers every 3 digits, is 26 characters wide - writeln!(buf, " {:>26},", num_buf.as_str())?; - } - writeln!(buf, "];")?; - - // thresholds balance - writeln!(buf)?; - writeln!(buf, "/// Upper thresholds delimiting the bag list.")?; - writeln!(buf, "pub const THRESHOLDS_BALANCES: [u128; {}] = [", thresholds.len())?; for threshold in thresholds { num_buf.write_formatted(&threshold, &format); // u64::MAX, with spacers every 3 digits, is 26 characters wide diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 3d7471bf4d680..3121157df68d8 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "An externalities provided environment that can load itself from remote nodes or cached files" +description = "An externalities provided environemnt that can load itself from remote nodes or cache files" readme = "README.md" [package.metadata.docs.rs] @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } env_logger = "0.9" +jsonrpsee = { version = "0.15.1", features = ["ws-client", "macros"] } log = "0.4.17" serde = "1.0.136" serde_json = "1.0" @@ -23,7 +24,6 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-io = { version = "6.0.0", path = "../../../primitives/io" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-version = { version = "5.0.0", path = "../../../primitives/version" } -substrate-rpc-client = { path = "../rpc/client" } [dev-dependencies] tokio = { version = "1.17.0", features = ["macros", "rt-multi-thread"] } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 86cfc767bf3b5..202560f18cf84 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -22,6 +22,13 @@ use codec::{Decode, Encode}; +use jsonrpsee::{ + core::{client::ClientT, Error as RpcError}, + proc_macros::rpc, + rpc_params, + ws_client::{WsClient, WsClientBuilder}, +}; + use log::*; use serde::de::DeserializeOwned; use sp_core::{ @@ -39,7 +46,8 @@ use std::{ path::{Path, PathBuf}, sync::Arc, }; -use substrate_rpc_client::{rpc_params, ws_client, ChainApi, ClientT, StateApi, WsClient}; + +pub mod rpc_api; type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; @@ -48,7 +56,41 @@ type ChildKeyValues = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io:443"; const BATCH_SIZE: usize = 1000; -const PAGE: u32 = 1000; +const PAGE: u32 = 512; + +#[rpc(client)] +pub trait RpcApi { + #[method(name = "childstate_getKeys")] + fn child_get_keys( + &self, + child_key: PrefixedStorageKey, + prefix: StorageKey, + hash: Option, + ) -> Result, RpcError>; + + #[method(name = "childstate_getStorage")] + fn child_get_storage( + &self, + child_key: PrefixedStorageKey, + prefix: StorageKey, + hash: Option, + ) -> Result; + + #[method(name = "state_getStorage")] + fn get_storage(&self, prefix: StorageKey, hash: Option) -> Result; + + #[method(name = "state_getKeysPaged")] + fn get_keys_paged( + &self, + prefix: Option, + count: u32, + start_key: Option, + hash: Option, + ) -> Result, RpcError>; + + #[method(name = "chain_getFinalizedHead")] + fn finalized_head(&self) -> Result; +} /// The execution mode. #[derive(Clone)] @@ -98,10 +140,14 @@ impl Transport { if let Self::Uri(uri) = self { log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri); - let ws_client = ws_client(uri).await.map_err(|e| { - log::error!(target: LOG_TARGET, "error: {:?}", e); - "failed to build ws client" - })?; + let ws_client = WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(&uri) + .await + .map_err(|e| { + log::error!(target: LOG_TARGET, "error: {:?}", e); + "failed to build ws client" + })?; *self = Self::RemoteClient(Arc::new(ws_client)) } @@ -212,7 +258,7 @@ pub struct Builder { // NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for // that. -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Self { mode: Default::default(), @@ -226,7 +272,7 @@ impl Default for Builder { } // Mode methods -impl Builder { +impl Builder { fn as_online(&self) -> &OnlineConfig { match &self.mode { Mode::Online(config) => config, @@ -245,38 +291,26 @@ impl Builder { } // RPC methods -impl Builder -where - B::Hash: DeserializeOwned, - B::Header: DeserializeOwned, -{ +impl Builder { async fn rpc_get_storage( &self, key: StorageKey, maybe_at: Option, ) -> Result { trace!(target: LOG_TARGET, "rpc: get_storage"); - match self.as_online().rpc_client().storage(key, maybe_at).await { - Ok(Some(res)) => Ok(res), - Ok(None) => Err("get_storage not found"), - Err(e) => { - error!(target: LOG_TARGET, "Error = {:?}", e); - Err("rpc get_storage failed.") - }, - } + self.as_online().rpc_client().get_storage(key, maybe_at).await.map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc get_storage failed." + }) } /// Get the latest finalized head. async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); - - // sadly this pretty much unreadable... - ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client()) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc finalized_head failed." - }) + self.as_online().rpc_client().finalized_head().await.map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc finalized_head failed." + }) } /// Get all the keys at `prefix` at `hash` using the paged, safe RPC methods. @@ -291,7 +325,7 @@ where let page = self .as_online() .rpc_client() - .storage_keys_paged(Some(prefix.clone()), PAGE, last_key.clone(), Some(at)) + .get_keys_paged(Some(prefix.clone()), PAGE, last_key.clone(), Some(at)) .await .map_err(|e| { error!(target: LOG_TARGET, "Error = {:?}", e); @@ -437,19 +471,19 @@ where child_prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { - // This is deprecated and will generate a warning which causes the CI to fail. - #[allow(warnings)] - let child_keys = substrate_rpc_client::ChildStateApi::storage_keys( - self.as_online().rpc_client(), - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix, - Some(at), - ) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + let child_keys = self + .as_online() + .rpc_client() + .child_get_keys( + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + child_prefix, + Some(at), + ) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; debug!( target: LOG_TARGET, @@ -463,11 +497,7 @@ where } // Internal methods -impl Builder -where - B::Hash: DeserializeOwned, - B::Header: DeserializeOwned, -{ +impl Builder { /// Save the given data to the top keys snapshot. fn save_top_snapshot(&self, data: &[KeyValue], path: &PathBuf) -> Result<(), &'static str> { let mut path = path.clone(); @@ -696,13 +726,12 @@ where let child_kv = match self.mode.clone() { Mode::Online(_) => self.load_child_remote_and_maybe_save(&top_kv).await?, - Mode::OfflineOrElseOnline(offline_config, _) => { + Mode::OfflineOrElseOnline(offline_config, _) => if let Ok(kv) = self.load_child_snapshot(&offline_config.state_snapshot.path) { kv } else { self.load_child_remote_and_maybe_save(&top_kv).await? - } - }, + }, Mode::Offline(ref config) => self .load_child_snapshot(&config.state_snapshot.path) .map_err(|why| { @@ -720,7 +749,7 @@ where } // Public methods -impl Builder { +impl Builder { /// Create a new builder. pub fn new() -> Self { Default::default() @@ -795,13 +824,7 @@ impl Builder { } self } -} -// Public methods -impl Builder -where - B::Header: DeserializeOwned, -{ /// Build the test externalities. pub async fn build(self) -> Result { let state_version = self.state_version; diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs new file mode 100644 index 0000000000000..37555de480d4c --- /dev/null +++ b/utils/frame/remote-externalities/src/rpc_api.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! WS RPC API for one off RPC calls to a substrate node. +// TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 + +use jsonrpsee::{ + core::client::ClientT, + rpc_params, + ws_client::{WsClient, WsClientBuilder}, +}; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header as HeaderT}, +}; + +/// Get the header of the block identified by `at` +pub async fn get_header(from: S, at: Block::Hash) -> Result +where + Block: BlockT, + Block::Header: serde::de::DeserializeOwned, + S: AsRef, +{ + let client = build_client(from).await?; + + client + .request::("chain_getHeader", rpc_params!(at)) + .await + .map_err(|e| format!("chain_getHeader request failed: {:?}", e)) +} + +/// Get the finalized head +pub async fn get_finalized_head(from: S) -> Result +where + Block: BlockT, + S: AsRef, +{ + let client = build_client(from).await?; + + client + .request::("chain_getFinalizedHead", None) + .await + .map_err(|e| format!("chain_getFinalizedHead request failed: {:?}", e)) +} + +/// Get the signed block identified by `at`. +pub async fn get_block(from: S, at: Block::Hash) -> Result +where + S: AsRef, + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: HeaderT, +{ + let client = build_client(from).await?; + let signed_block = client + .request::>("chain_getBlock", rpc_params!(at)) + .await + .map_err(|e| format!("chain_getBlock request failed: {:?}", e))?; + + Ok(signed_block.block) +} + +/// Build a websocket client that connects to `from`. +async fn build_client>(from: S) -> Result { + WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(from.as_ref()) + .await + .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) +} + +/// Get the runtime version of a given chain. +pub async fn get_runtime_version( + from: S, + at: Option, +) -> Result +where + S: AsRef, + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: HeaderT, +{ + let client = build_client(from).await?; + client + .request::("state_getRuntimeVersion", rpc_params!(at)) + .await + .map_err(|e| format!("state_getRuntimeVersion request failed: {:?}", e)) +} diff --git a/utils/frame/rpc/client/Cargo.toml b/utils/frame/rpc/client/Cargo.toml deleted file mode 100644 index 80aa60f199f1f..0000000000000 --- a/utils/frame/rpc/client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "substrate-rpc-client" -version = "0.10.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Shared JSON-RPC client" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -jsonrpsee = { version = "0.15.1", features = ["ws-client"] } -sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } -async-trait = "0.1.57" -serde = "1" -sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } -log = "0.4" - -[dev-dependencies] -tokio = { version = "1.17.0", features = ["macros", "rt-multi-thread", "sync"] } -sp-core = { path = "../../../../primitives/core" } \ No newline at end of file diff --git a/utils/frame/rpc/client/src/lib.rs b/utils/frame/rpc/client/src/lib.rs deleted file mode 100644 index 254cc193c0e67..0000000000000 --- a/utils/frame/rpc/client/src/lib.rs +++ /dev/null @@ -1,265 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Shared JSON-RPC client related code and abstractions. -//! -//! It exposes a `WebSocket JSON-RPC` client that implements the RPC interface in [`sc-rpc-api`] -//! along with some abstractions. -//! -//! ## Usage -//! -//! ```no_run -//! # use substrate_rpc_client::{ws_client, StateApi}; -//! # use sp_core::{H256, storage::StorageKey}; -//! -//! #[tokio::main] -//! async fn main() { -//! -//! let client = ws_client("ws://127.0.0.1:9944").await.unwrap(); -//! client.storage(StorageKey(vec![]), Some(H256::zero())).await.unwrap(); -//! -//! // if all type params are not known you need to provide type params -//! StateApi::::storage(&client, StorageKey(vec![]), None).await.unwrap(); -//! } -//! ``` - -use async_trait::async_trait; -use serde::de::DeserializeOwned; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::collections::VecDeque; - -pub use jsonrpsee::{ - core::client::{ClientT, Subscription, SubscriptionClientT}, - rpc_params, - ws_client::{WsClient, WsClientBuilder}, -}; -pub use sc_rpc_api::{ - author::AuthorApiClient as AuthorApi, chain::ChainApiClient as ChainApi, - child_state::ChildStateApiClient as ChildStateApi, dev::DevApiClient as DevApi, - offchain::OffchainApiClient as OffchainApi, state::StateApiClient as StateApi, - system::SystemApiClient as SystemApi, -}; - -/// Create a new `WebSocket` connection with shared settings. -pub async fn ws_client(uri: impl AsRef) -> Result { - WsClientBuilder::default() - .max_request_body_size(u32::MAX) - .request_timeout(std::time::Duration::from_secs(60 * 10)) - .connection_timeout(std::time::Duration::from_secs(60)) - .max_notifs_per_subscription(1024) - .build(uri) - .await - .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) -} - -/// Abstraction over RPC calling for headers. -#[async_trait] -pub trait HeaderProvider -where - Block::Header: HeaderT, -{ - /// Awaits for the header of the block with hash `hash`. - async fn get_header(&self, hash: Block::Hash) -> Block::Header; -} - -#[async_trait] -impl HeaderProvider for WsClient -where - Block::Header: DeserializeOwned, -{ - async fn get_header(&self, hash: Block::Hash) -> Block::Header { - ChainApi::<(), Block::Hash, Block::Header, ()>::header(self, Some(hash)) - .await - .unwrap() - .unwrap() - } -} - -/// Abstraction over RPC subscription for finalized headers. -#[async_trait] -pub trait HeaderSubscription -where - Block::Header: HeaderT, -{ - /// Await for the next finalized header from the subscription. - /// - /// Returns `None` if either the subscription has been closed or there was an error when reading - /// an object from the client. - async fn next_header(&mut self) -> Option; -} - -#[async_trait] -impl HeaderSubscription for Subscription -where - Block::Header: DeserializeOwned, -{ - async fn next_header(&mut self) -> Option { - match self.next().await { - Some(Ok(header)) => Some(header), - None => { - log::warn!("subscription closed"); - None - }, - Some(Err(why)) => { - log::warn!("subscription returned error: {:?}. Probably decoding has failed.", why); - None - }, - } - } -} - -/// Stream of all finalized headers. -/// -/// Returned headers are guaranteed to be ordered. There are no missing headers (even if some of -/// them lack justification). -pub struct FinalizedHeaders< - 'a, - Block: BlockT, - HP: HeaderProvider, - HS: HeaderSubscription, -> { - header_provider: &'a HP, - subscription: HS, - fetched_headers: VecDeque, - last_returned: Option<::Hash>, -} - -impl<'a, Block: BlockT, HP: HeaderProvider, HS: HeaderSubscription> - FinalizedHeaders<'a, Block, HP, HS> -where - ::Header: DeserializeOwned, -{ - pub fn new(header_provider: &'a HP, subscription: HS) -> Self { - Self { - header_provider, - subscription, - fetched_headers: VecDeque::new(), - last_returned: None, - } - } - - /// Reads next finalized header from the subscription. If some headers (without justification) - /// have been skipped, fetches them as well. Returns number of headers that have been fetched. - /// - /// All fetched headers are stored in `self.fetched_headers`. - async fn fetch(&mut self) -> usize { - let last_finalized = match self.subscription.next_header().await { - Some(header) => header, - None => return 0, - }; - - self.fetched_headers.push_front(last_finalized.clone()); - - let mut last_finalized_parent = *last_finalized.parent_hash(); - let last_returned = self.last_returned.unwrap_or(last_finalized_parent); - - while last_finalized_parent != last_returned { - let parent_header = self.header_provider.get_header(last_finalized_parent).await; - self.fetched_headers.push_front(parent_header.clone()); - last_finalized_parent = *parent_header.parent_hash(); - } - - self.fetched_headers.len() - } - - /// Get the next finalized header. - pub async fn next(&mut self) -> Option { - if self.fetched_headers.is_empty() { - self.fetch().await; - } - - if let Some(header) = self.fetched_headers.pop_front() { - self.last_returned = Some(header.hash()); - Some(header) - } else { - None - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header, H256}; - use std::sync::Arc; - use tokio::sync::Mutex; - - type Block = TBlock>; - type BlockNumber = u64; - type Hash = H256; - - struct MockHeaderProvider(pub Arc>>); - - fn headers() -> Vec
{ - let mut headers = vec![Header::new_from_number(0)]; - for n in 1..11 { - headers.push(Header { - parent_hash: headers.last().unwrap().hash(), - ..Header::new_from_number(n) - }) - } - headers - } - - #[async_trait] - impl HeaderProvider for MockHeaderProvider { - async fn get_header(&self, _hash: Hash) -> Header { - let height = self.0.lock().await.pop_front().unwrap(); - headers()[height as usize].clone() - } - } - - struct MockHeaderSubscription(pub VecDeque); - - #[async_trait] - impl HeaderSubscription for MockHeaderSubscription { - async fn next_header(&mut self) -> Option
{ - self.0.pop_front().map(|h| headers()[h as usize].clone()) - } - } - - #[tokio::test] - async fn finalized_headers_works_when_every_block_comes_from_subscription() { - let heights = vec![4, 5, 6, 7]; - - let provider = MockHeaderProvider(Default::default()); - let subscription = MockHeaderSubscription(heights.clone().into()); - let mut headers = FinalizedHeaders::new(&provider, subscription); - - for h in heights { - assert_eq!(h, headers.next().await.unwrap().number); - } - assert_eq!(None, headers.next().await); - } - - #[tokio::test] - async fn finalized_headers_come_from_subscription_and_provider_if_in_need() { - let all_heights = 3..11; - let heights_in_subscription = vec![3, 4, 6, 10]; - // Consecutive headers will be requested in the reversed order. - let heights_not_in_subscription = vec![5, 9, 8, 7]; - - let provider = MockHeaderProvider(Arc::new(Mutex::new(heights_not_in_subscription.into()))); - let subscription = MockHeaderSubscription(heights_in_subscription.into()); - let mut headers = FinalizedHeaders::new(&provider, subscription); - - for h in all_heights { - assert_eq!(h, headers.next().await.unwrap().number); - } - assert_eq!(None, headers.next().await); - } -} diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index d45e502df276c..00fdc87a506e8 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -23,7 +23,7 @@ sp-io = { path = "../../../../primitives/io" } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } -trie-db = "0.24.0" +trie-db = { version = "0.23.1" } jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index ab180c7d45d5b..b6d403ff2fcfd 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -24,18 +24,15 @@ use jsonrpsee::{ }; use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::sync::Arc; use sp_core::{ storage::{ChildInfo, ChildType, PrefixedStorageKey}, Hasher, }; -use sp_state_machine::backend::AsTrieBackend; -use sp_trie::{ - trie_types::{TrieDB, TrieDBBuilder}, - KeySpacedDB, Trie, -}; +use sp_state_machine::Backend; +use sp_trie::{trie_types::TrieDB, KeySpacedDB, Trie}; use trie_db::{ node::{NodePlan, ValuePlan}, TrieDBNodeIterator, @@ -44,9 +41,9 @@ use trie_db::{ fn count_migrate<'a, H: Hasher>( storage: &'a dyn trie_db::HashDBRef>, root: &'a H::Out, -) -> std::result::Result<(u64, TrieDB<'a, 'a, H>), String> { +) -> std::result::Result<(u64, TrieDB<'a, H>), String> { let mut nb = 0u64; - let trie = TrieDBBuilder::new(storage, root).build(); + let trie = TrieDB::new(storage, root).map_err(|e| format!("TrieDB creation error: {}", e))?; let iter_node = TrieDBNodeIterator::new(&trie).map_err(|e| format!("TrieDB node iterator error: {}", e))?; for node in iter_node { @@ -71,9 +68,13 @@ pub fn migration_status(backend: &B) -> std::result::Result<(u64, u64), St where H: Hasher, H::Out: codec::Codec, - B: AsTrieBackend, + B: Backend, { - let trie_backend = backend.as_trie_backend(); + let trie_backend = if let Some(backend) = backend.as_trie_backend() { + backend + } else { + return Err("No access to trie from backend.".to_string()) + }; let essence = trie_backend.essence(); let (nb_to_migrate, trie) = count_migrate(essence, essence.root())?; @@ -144,8 +145,8 @@ where fn call(&self, at: Option<::Hash>) -> RpcResult { self.deny_unsafe.check_if_safe()?; - let hash = at.unwrap_or_else(|| self.client.info().best_hash); - let state = self.backend.state_at(hash).map_err(error_into_rpc_err)?; + let block_id = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + let state = self.backend.state_at(block_id).map_err(error_into_rpc_err)?; let (top, child) = migration_status(&state).map_err(error_into_rpc_err)?; Ok(MigrationStatusResult { diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 38e40a33d9c7f..2104774bd2605 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -27,6 +27,4 @@ sp-storage = { version = "6.0.0", path = "../../../../primitives/storage" } scale-info = "2.1.1" jsonrpsee = { version = "0.15.1", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.17.0" -sp-core = { version = "6.0.0", path = "../../../../primitives/core" } -sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index fdf6fe0be8172..2ee007c84f0aa 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -34,104 +34,50 @@ use sp_storage::{StorageData, StorageKey}; /// # use jsonrpsee::core::Error as RpcError; /// # use jsonrpsee::ws_client::WsClientBuilder; /// # use codec::Encode; -/// # use frame_support::{construct_runtime, traits::ConstU32}; +/// # use frame_support::{decl_storage, decl_module}; /// # use substrate_frame_rpc_support::StorageQuery; +/// # use frame_system::Config; /// # use sc_rpc_api::state::StateApiClient; -/// # use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; /// # -/// # construct_runtime!( -/// # pub enum TestRuntime where -/// # Block = frame_system::mocking::MockBlock, -/// # NodeBlock = frame_system::mocking::MockBlock, -/// # UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic, -/// # { -/// # System: frame_system::{Pallet, Call, Config, Storage, Event}, -/// # Test: pallet_test::{Pallet, Storage}, -/// # } -/// # ); +/// # // Hash would normally be ::Hash, but we don't have +/// # // frame_system::Config implemented for TestRuntime. Here we just pretend. +/// # type Hash = (); /// # -/// # type Hash = sp_core::H256; /// # -/// # impl frame_system::Config for TestRuntime { -/// # type BaseCallFilter = (); -/// # type BlockWeights = (); -/// # type BlockLength = (); -/// # type RuntimeOrigin = RuntimeOrigin; -/// # type RuntimeCall = RuntimeCall; -/// # type Index = u64; -/// # type BlockNumber = u64; -/// # type Hash = Hash; -/// # type Hashing = BlakeTwo256; -/// # type AccountId = u64; -/// # type Lookup = IdentityLookup; -/// # type Header = Header; -/// # type RuntimeEvent = RuntimeEvent; -/// # type BlockHashCount = (); -/// # type DbWeight = (); -/// # type Version = (); -/// # type PalletInfo = PalletInfo; -/// # type AccountData = (); -/// # type OnNewAccount = (); -/// # type OnKilledAccount = (); -/// # type SystemWeightInfo = (); -/// # type SS58Prefix = (); -/// # type OnSetCode = (); -/// # type MaxConsumers = ConstU32<16>; -/// # } +/// # struct TestRuntime; /// # -/// # impl pallet_test::Config for TestRuntime {} +/// # decl_module! { +/// # pub struct Module for enum Call where origin: T::Origin {} +/// # } /// # -/// /// pub type Loc = (i64, i64, i64); /// pub type Block = u8; /// /// // Note that all fields are marked pub. -/// pub use self::pallet_test::*; -/// -/// #[frame_support::pallet] -/// mod pallet_test { -/// use super::*; -/// use frame_support::pallet_prelude::*; -/// -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(PhantomData); -/// -/// #[pallet::config] -/// pub trait Config: frame_system::Config {} -/// -/// #[pallet::storage] -/// pub type LastActionId = StorageValue<_, u64, ValueQuery>; -/// -/// #[pallet::storage] -/// pub type Voxels = StorageMap<_, Blake2_128Concat, Loc, Block>; -/// -/// #[pallet::storage] -/// pub type Actions = StorageMap<_, Blake2_128Concat, u64, Loc>; -/// -/// #[pallet::storage] -/// pub type Prefab = StorageDoubleMap< -/// _, -/// Blake2_128Concat, u128, -/// Blake2_128Concat, (i8, i8, i8), Block -/// >; +/// decl_storage! { +/// trait Store for Module as TestRuntime { +/// pub LastActionId: u64; +/// pub Voxels: map hasher(blake2_128_concat) Loc => Block; +/// pub Actions: map hasher(blake2_128_concat) u64 => Loc; +/// pub Prefab: double_map hasher(blake2_128_concat) u128, hasher(blake2_128_concat) (i8, i8, i8) => Block; +/// } /// } /// /// #[tokio::main] /// async fn main() -> Result<(), RpcError> { /// let cl = WsClientBuilder::default().build("ws://[::1]:9944").await?; /// -/// let q = StorageQuery::value::>(); +/// let q = StorageQuery::value::(); /// let hash = None::; /// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::map::, _>((0, 0, 0)); +/// let q = StorageQuery::map::((0, 0, 0)); /// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::map::, _>(12); +/// let q = StorageQuery::map::(12); /// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::double_map::, _, _>(3, (0, 0, 0)); +/// let q = StorageQuery::double_map::(3, (0, 0, 0)); /// let _: Option = q.get(&cl, hash).await?; /// /// Ok(()) diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index c7191b7eb7f5f..4b4f9bdb2809a 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -13,12 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "3.1.18", features = ["derive"] } log = "0.4.17" parity-scale-codec = "3.0.0" serde = "1.0.136" zstd = { version = "0.11.2", default-features = false } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } +jsonrpsee = { version = "0.15.1", default-features = false, features = ["ws-client"] } sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" } @@ -30,14 +31,3 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } -sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } -frame-try-runtime = { optional = true, path = "../../../../frame/try-runtime" } -substrate-rpc-client = { path = "../../rpc/client" } - -[dev-dependencies] -tokio = "1.17.0" - -[features] -try-runtime = [ - "frame-try-runtime/try-runtime", -] diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 56d88b9cb8919..204acd879312f 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -19,46 +19,33 @@ use crate::{ build_executor, ensure_matching_spec, extract_code, full_extensions, hash_of, local_spec, state_machine_call_with_proof, SharedParams, State, LOG_TARGET, }; -use parity_scale_codec::Encode; +use remote_externalities::rpc_api; use sc_service::{Configuration, NativeExecutionDispatch}; use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi}; /// Configurations of the [`Command::ExecuteBlock`]. -/// -/// This will always call into `TryRuntime_execute_block`, which can optionally skip the state-root -/// check (useful for trying a unreleased runtime), and can execute runtime sanity checks as well. #[derive(Debug, Clone, clap::Parser)] pub struct ExecuteBlockCmd { /// Overwrite the wasm code in state or not. - #[arg(long)] + #[clap(long)] overwrite_wasm_code: bool, - /// If set the state root check is disabled. - #[arg(long)] - no_state_root_check: bool, - - /// Which try-state targets to execute when running this command. - /// - /// Expected values: - /// - `all` - /// - `none` - /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. - /// `Staking, System`). - /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a - /// round-robin fashion. - #[arg(long, default_value = "none")] - try_state: frame_try_runtime::TryStateSelect, + /// If set, then the state root check is disabled by the virtue of calling into + /// `TryRuntime_execute_block_no_check` instead of + /// `Core_execute_block`. + #[clap(long)] + no_check: bool, /// The block hash at which to fetch the block. /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::at` is. Only use this (with care) when combined with a snapshot. - #[arg( + #[clap( long, - value_parser = crate::parse::hash + multiple_values = false, + parse(try_from_str = crate::parse::hash) )] block_at: Option, @@ -66,9 +53,10 @@ pub struct ExecuteBlockCmd { /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::uri` is. Only use this (with care) when combined with a snapshot. - #[arg( + #[clap( long, - value_parser = crate::parse::url + multiple_values = false, + parse(try_from_str = crate::parse::url) )] block_ws_uri: Option, @@ -77,34 +65,22 @@ pub struct ExecuteBlockCmd { /// For this command only, if the `live` is used, then state of the parent block is fetched. /// /// If `block_at` is provided, then the [`State::Live::at`] is being ignored. - #[command(subcommand)] + #[clap(subcommand)] state: State, } impl ExecuteBlockCmd { - async fn block_at(&self, ws_uri: String) -> sc_cli::Result + fn block_at(&self) -> sc_cli::Result where - Block::Hash: FromStr + serde::de::DeserializeOwned, + Block::Hash: FromStr, ::Err: Debug, - Block::Header: serde::de::DeserializeOwned, { - let rpc = ws_client(&ws_uri).await?; - match (&self.block_at, &self.state) { (Some(block_at), State::Snap { .. }) => hash_of::(block_at), (Some(block_at), State::Live { .. }) => { log::warn!(target: LOG_TARGET, "--block-at is provided while state type is live. the `Live::at` will be ignored"); hash_of::(block_at) }, - (None, State::Live { at: None, .. }) => { - log::warn!( - target: LOG_TARGET, - "No --block-at or --at provided, using the latest finalized block instead" - ); - ChainApi::<(), Block::Hash, Block::Header, ()>::finalized_head(&rpc) - .await - .map_err(|e| e.to_string().into()) - }, (None, State::Live { at: Some(at), .. }) => hash_of::(at), _ => { panic!("either `--block-at` must be provided, or state must be `live with a proper `--at``"); @@ -140,8 +116,6 @@ where Block: BlockT + serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, - Block::Hash: serde::de::DeserializeOwned, - Block::Header: serde::de::DeserializeOwned, NumberFor: FromStr, as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, @@ -149,18 +123,13 @@ where let executor = build_executor::(&shared, &config); let execution = shared.execution; + let block_at = command.block_at::()?; let block_ws_uri = command.block_ws_uri::(); - let block_at = command.block_at::(block_ws_uri.clone()).await?; - let rpc = ws_client(&block_ws_uri).await?; - let block: Block = ChainApi::<(), Block::Hash, Block::Header, _>::block(&rpc, Some(block_at)) - .await - .unwrap() - .unwrap(); + let block: Block = rpc_api::get_block::(block_ws_uri.clone(), block_at).await?; let parent_hash = block.header().parent_hash(); log::info!( target: LOG_TARGET, - "fetched block #{:?} from {:?}, parent_hash to fetch the state {:?}", - block.header().number(), + "fetched block from {:?}, parent_hash to fetch the state {:?}", block_ws_uri, parent_hash ); @@ -170,8 +139,7 @@ where .state .builder::()? // make sure the state is being build with the parent hash, if it is online. - .overwrite_online_at(parent_hash.to_owned()) - .state_version(shared.state_version); + .overwrite_online_at(parent_hash.to_owned()); let builder = if command.overwrite_wasm_code { log::info!( @@ -193,7 +161,6 @@ where let (mut header, extrinsics) = block.deconstruct(); header.digest_mut().pop(); let block = Block::new(header, extrinsics); - let payload = (block.clone(), !command.no_state_root_check, command.try_state).encode(); let (expected_spec_name, expected_spec_version, _) = local_spec::(&ext, &executor); @@ -201,7 +168,7 @@ where block_ws_uri.clone(), expected_spec_name, expected_spec_version, - shared.no_spec_check_panic, + shared.no_spec_name_check, ) .await; @@ -209,8 +176,8 @@ where &ext, &executor, execution, - "TryRuntime_execute_block", - &payload, + if command.no_check { "TryRuntime_execute_block_no_check" } else { "Core_execute_block" }, + block.encode().as_ref(), full_extensions(), )?; diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 1cc371c8f22fd..e2e6bd7244945 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -19,15 +19,17 @@ use crate::{ build_executor, ensure_matching_spec, extract_code, full_extensions, local_spec, parse, state_machine_call_with_proof, SharedParams, LOG_TARGET, }; -use parity_scale_codec::{Decode, Encode}; -use remote_externalities::{Builder, Mode, OnlineConfig}; +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + ws_client::WsClientBuilder, +}; +use parity_scale_codec::Decode; +use remote_externalities::{rpc_api, Builder, Mode, OnlineConfig}; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; -use serde::{de::DeserializeOwned, Serialize}; use sp_core::H256; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi, FinalizedHeaders, Subscription, WsClient}; const SUB: &str = "chain_subscribeFinalizedHeads"; const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; @@ -36,45 +38,8 @@ const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; #[derive(Debug, Clone, clap::Parser)] pub struct FollowChainCmd { /// The url to connect to. - #[arg(short, long, value_parser = parse::url)] + #[clap(short, long, parse(try_from_str = parse::url))] uri: String, - - /// If set, then the state root check is enabled. - #[arg(long)] - state_root_check: bool, - - /// Which try-state targets to execute when running this command. - /// - /// Expected values: - /// - `all` - /// - `none` - /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. - /// `Staking, System`). - /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a - /// round-robin fashion. - #[arg(long, default_value = "none")] - try_state: frame_try_runtime::TryStateSelect, - - /// If present, a single connection to a node will be kept and reused for fetching blocks. - #[arg(long)] - keep_connection: bool, -} - -/// Start listening for with `SUB` at `url`. -/// -/// Returns a pair `(client, subscription)` - `subscription` alone will be useless, because it -/// relies on the related alive `client`. -async fn start_subscribing( - url: &str, -) -> sc_cli::Result<(WsClient, Subscription
)> { - let client = ws_client(url).await.map_err(|e| sc_cli::Error::Application(e.into()))?; - - log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); - - let sub = ChainApi::<(), (), Header, ()>::subscribe_finalized_heads(&client) - .await - .map_err(|e| sc_cli::Error::Application(e.into()))?; - Ok((client, sub)) } pub(crate) async fn follow_chain( @@ -83,32 +48,49 @@ pub(crate) async fn follow_chain( config: Configuration, ) -> sc_cli::Result<()> where - Block: BlockT + DeserializeOwned, + Block: BlockT + serde::de::DeserializeOwned, Block::Hash: FromStr, - Block::Header: DeserializeOwned, + Block::Header: serde::de::DeserializeOwned, ::Err: Debug, NumberFor: FromStr, as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, { let mut maybe_state_ext = None; - let (rpc, subscription) = start_subscribing::(&command.uri).await?; + + let client = WsClientBuilder::default() + .connection_timeout(std::time::Duration::new(20, 0)) + .max_notifs_per_subscription(1024) + .max_request_body_size(u32::MAX) + .build(&command.uri) + .await + .unwrap(); + + log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); + let mut subscription: Subscription = + client.subscribe(SUB, None, UN_SUB).await.unwrap(); let (code_key, code) = extract_code(&config.chain_spec)?; let executor = build_executor::(&shared, &config); let execution = shared.execution; - let mut finalized_headers: FinalizedHeaders = - FinalizedHeaders::new(&rpc, subscription); + loop { + let header = match subscription.next().await { + Some(Ok(header)) => header, + None => { + log::warn!("subscription closed"); + break + }, + Some(Err(why)) => { + log::warn!("subscription returned error: {:?}. Probably decoding has failed.", why); + continue + }, + }; - while let Some(header) = finalized_headers.next().await { let hash = header.hash(); let number = header.number(); - let block: Block = ChainApi::<(), Block::Hash, Block::Header, _>::block(&rpc, Some(hash)) - .await - .unwrap() - .unwrap(); + let block = rpc_api::get_block::(&command.uri, hash).await.unwrap(); log::debug!( target: LOG_TARGET, @@ -120,13 +102,11 @@ where // create an ext at the state of this block, whatever is the first subscription event. if maybe_state_ext.is_none() { - let builder = Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: command.uri.clone().into(), - at: Some(*header.parent_hash()), - ..Default::default() - })) - .state_version(shared.state_version); + let builder = Builder::::new().mode(Mode::Online(OnlineConfig { + transport: command.uri.clone().into(), + at: Some(*header.parent_hash()), + ..Default::default() + })); let new_ext = builder .inject_hashed_key_value(&[(code_key.clone(), code.clone())]) @@ -145,7 +125,7 @@ where command.uri.clone(), expected_spec_name, expected_spec_version, - shared.no_spec_check_panic, + shared.no_spec_name_check, ) .await; @@ -159,13 +139,13 @@ where state_ext, &executor, execution, - "TryRuntime_execute_block", - (block, command.state_root_check, command.try_state.clone()).encode().as_ref(), + "TryRuntime_execute_block_no_check", + block.encode().as_ref(), full_extensions(), )?; - let consumed_weight = ::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode weight: {:?}", e))?; + let consumed_weight = ::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output: {:?}", e))?; let storage_changes = changes .drain_storage_changes( diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 8d2585372b4a8..50780f4513b2f 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -20,27 +20,28 @@ use crate::{ parse, state_machine_call, SharedParams, State, LOG_TARGET, }; use parity_scale_codec::Encode; +use remote_externalities::rpc_api; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi}; /// Configurations of the [`Command::OffchainWorker`]. #[derive(Debug, Clone, clap::Parser)] pub struct OffchainWorkerCmd { /// Overwrite the wasm code in state or not. - #[arg(long)] + #[clap(long)] overwrite_wasm_code: bool, /// The block hash at which to fetch the header. /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::at` is. Only use this (with care) when combined with a snapshot. - #[arg( + #[clap( long, - value_parser = parse::hash + multiple_values = false, + parse(try_from_str = parse::hash) )] header_at: Option, @@ -48,14 +49,15 @@ pub struct OffchainWorkerCmd { /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::uri` is. Only use this (with care) when combined with a snapshot. - #[arg( + #[clap( long, - value_parser = parse::url + multiple_values = false, + parse(try_from_str = parse::url) )] header_ws_uri: Option, /// The state type to use. - #[command(subcommand)] + #[clap(subcommand)] pub state: State, } @@ -117,11 +119,7 @@ where let header_at = command.header_at::()?; let header_ws_uri = command.header_ws_uri::(); - let rpc = ws_client(&header_ws_uri).await?; - let header = ChainApi::<(), Block::Hash, Block::Header, ()>::header(&rpc, Some(header_at)) - .await - .unwrap() - .unwrap(); + let header = rpc_api::get_header::(header_ws_uri.clone(), header_at).await?; log::info!( target: LOG_TARGET, "fetched header from {:?}, block number: {:?}", @@ -130,7 +128,7 @@ where ); let ext = { - let builder = command.state.builder::()?.state_version(shared.state_version); + let builder = command.state.builder::()?; let builder = if command.overwrite_wasm_code { log::info!( @@ -153,7 +151,7 @@ where header_ws_uri, expected_spec_name, expected_spec_version, - shared.no_spec_check_panic, + shared.no_spec_name_check, ) .await; diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index fba34ddfb5060..616498da02497 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -21,7 +21,6 @@ use parity_scale_codec::Decode; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sp_weights::Weight; use crate::{ build_executor, ensure_matching_spec, extract_code, local_spec, state_machine_call_with_proof, @@ -32,7 +31,7 @@ use crate::{ #[derive(Debug, Clone, clap::Parser)] pub struct OnRuntimeUpgradeCmd { /// The state type to use. - #[command(subcommand)] + #[clap(subcommand)] pub state: State, } @@ -45,7 +44,6 @@ where Block: BlockT + serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, - Block::Header: serde::de::DeserializeOwned, NumberFor: FromStr, as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, @@ -54,7 +52,7 @@ where let execution = shared.execution; let ext = { - let builder = command.state.builder::()?.state_version(shared.state_version); + let builder = command.state.builder::()?; let (code_key, code) = extract_code(&config.chain_spec)?; builder.inject_hashed_key_value(&[(code_key, code)]).build().await? }; @@ -66,7 +64,7 @@ where uri, expected_spec_name, expected_spec_version, - shared.no_spec_check_panic, + shared.no_spec_name_check, ) .await; } @@ -80,15 +78,14 @@ where Default::default(), // we don't really need any extensions here. )?; - let (weight, total_weight) = <(Weight, Weight) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode weight: {:?}", e))?; + let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output: {:?}", e))?; log::info!( target: LOG_TARGET, - "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = ({} ps, {} byte), total weight = ({} ps, {} byte) ({:.2} %, {:.2} %).", - weight.ref_time(), weight.proof_size(), - total_weight.ref_time(), total_weight.proof_size(), - (weight.ref_time() as f64 / total_weight.ref_time().max(1) as f64) * 100.0, - (weight.proof_size() as f64 / total_weight.proof_size().max(1) as f64) * 100.0, + "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", + weight, + total_weight, + weight as f64 / total_weight.max(1) as f64 ); Ok(()) diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index f54354342bf28..f77f92c625c9d 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -132,20 +132,20 @@ //! added, given the right flag: //! //! ```ignore -//! //! #[cfg(feature = try-runtime)] -//! fn pre_upgrade() -> Result, &'static str> {} +//! fn pre_upgrade() -> Result<(), &'static str> {} //! //! #[cfg(feature = try-runtime)] -//! fn post_upgrade(state: Vec) -> Result<(), &'static str> {} +//! fn post_upgrade() -> Result<(), &'static str> {} //! ``` //! //! (The pallet macro syntax will support this simply as a part of `#[pallet::hooks]`). //! //! These hooks allow you to execute some code, only within the `on-runtime-upgrade` command, before -//! and after the migration. Moreover, `pre_upgrade` can return a `Vec` that contains arbitrary -//! encoded data (usually some pre-upgrade state) which will be passed to `post_upgrade` after -//! upgrading and used for post checking. +//! and after the migration. If any data needs to be temporarily stored between the pre/post +//! migration hooks, `OnRuntimeUpgradeHelpersExt` can help with that. Note that you should be +//! mindful with any mutable storage ops in the pre/post migration checks, as you almost certainly +//! will not want to mutate any of the storage that is to be migrated. //! //! #### Logging //! @@ -265,8 +265,6 @@ //! -s snap \ //! ``` -#![cfg(feature = "try-runtime")] - use parity_scale_codec::Decode; use remote_externalities::{ Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, TestExternalities, @@ -295,10 +293,8 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor}, DeserializeOwned, }; -use sp_state_machine::{OverlayedChanges, StateMachine, TrieBackendBuilder}; -use sp_version::StateVersion; +use sp_state_machine::{InMemoryProvingBackend, OverlayedChanges, StateMachine}; use std::{fmt::Debug, path::PathBuf, str::FromStr}; -use substrate_rpc_client::{ws_client, StateApi}; mod commands; pub(crate) mod parse; @@ -338,18 +334,17 @@ pub enum Command { /// different state transition function. /// /// To make testing slightly more dynamic, you can disable the state root check by enabling - /// `ExecuteBlockCmd::no_check`. If you get signature verification errors, you should manually - /// tweak your local runtime's spec version to fix this. + /// `ExecuteBlockCmd::no_check`. If you get signature verification errors, you should + /// manually tweak your local runtime's spec version to fix this. /// /// A subtle detail of execute block is that if you want to execute block 100 of a live chain /// again, you need to scrape the state of block 99. This is already done automatically if you /// use [`State::Live`], and the parent hash of the target block is used to scrape the state. /// If [`State::Snap`] is being used, then this needs to be manually taken into consideration. /// - /// This does not execute the same runtime api as normal block import do, namely - /// `Core_execute_block`. Instead, it uses `TryRuntime_execute_block`, which can optionally - /// skip state-root check (useful for trying a unreleased runtime), and can execute runtime - /// sanity checks as well. + /// This executes the same runtime api as normal block import, namely `Core_execute_block`. If + /// `ExecuteBlockCmd::no_check` is set, it uses a custom, try-runtime-only runtime + /// api called `TryRuntime_execute_block_no_check`. ExecuteBlock(commands::execute_block::ExecuteBlockCmd), /// Executes *the offchain worker hooks* of a given block against some state. @@ -387,7 +382,6 @@ pub enum Command { /// Shared parameters of the `try-runtime` commands #[derive(Debug, Clone, clap::Parser)] -#[group(skip)] pub struct SharedParams { /// Shared parameters of substrate cli. #[allow(missing_docs)] @@ -395,42 +389,38 @@ pub struct SharedParams { pub shared_params: sc_cli::SharedParams, /// The execution strategy that should be used. - #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true, default_value_t = ExecutionStrategy::Wasm)] + #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true, default_value = "wasm")] pub execution: ExecutionStrategy, /// Type of wasm execution used. - #[arg( + #[clap( long = "wasm-execution", value_name = "METHOD", - value_enum, + possible_values = WasmExecutionMethod::variants(), ignore_case = true, - default_value_t = DEFAULT_WASM_EXECUTION_METHOD, + default_value = DEFAULT_WASM_EXECUTION_METHOD, )] pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. /// /// Only has an effect when `wasm-execution` is set to `compiled`. - #[arg( + #[clap( long = "wasm-instantiation-strategy", value_name = "STRATEGY", default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - value_enum, + arg_enum, )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// The number of 64KB pages to allocate for Wasm execution. Defaults to /// [`sc_service::Configuration.default_heap_pages`]. - #[arg(long)] + #[clap(long)] pub heap_pages: Option, - /// When enabled, the spec check will not panic, and instead only show a warning. - #[arg(long)] - pub no_spec_check_panic: bool, - - /// State version that is used by the chain. - #[arg(long, default_value_t = StateVersion::V1, value_parser = parse::state_version)] - pub state_version: StateVersion, + /// When enabled, the spec name check will not panic, and instead only show a warning. + #[clap(long)] + pub no_spec_name_check: bool, } /// Our `try-runtime` command. @@ -441,7 +431,7 @@ pub struct TryRuntimeCmd { #[clap(flatten)] pub shared: SharedParams, - #[command(subcommand)] + #[clap(subcommand)] pub command: Command, } @@ -452,17 +442,17 @@ pub enum State { /// /// This can be crated by passing a value to [`State::Live::snapshot_path`]. Snap { - #[arg(short, long)] + #[clap(short, long)] snapshot_path: PathBuf, }, /// Use a live chain as the source of runtime state. Live { /// The url to connect to. - #[arg( + #[clap( short, long, - value_parser = parse::url, + parse(try_from_str = parse::url), )] uri: String, @@ -470,20 +460,21 @@ pub enum State { /// /// If non provided, then the latest finalized head is used. This is particularly useful /// for [`Command::OnRuntimeUpgrade`]. - #[arg( + #[clap( short, long, - value_parser = parse::hash, + multiple_values = false, + parse(try_from_str = parse::hash), )] at: Option, /// An optional state snapshot file to WRITE to. Not written if set to `None`. - #[arg(short, long)] + #[clap(short, long)] snapshot_path: Option, /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will /// be scraped. - #[arg(short, long, num_args = 1..)] + #[clap(short, long, multiple_values = true)] pallet: Vec, /// Fetch the child-keys as well. @@ -491,7 +482,7 @@ pub enum State { /// Default is `false`, if specific `--pallets` are specified, `true` otherwise. In other /// words, if you scrape the whole state the child tree data is included out of the box. /// Otherwise, it must be enabled explicitly using this flag. - #[arg(long)] + #[clap(long)] child_tree: bool, }, } @@ -544,8 +535,8 @@ impl State { impl TryRuntimeCmd { pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> where - Block: BlockT + DeserializeOwned, - Block::Header: DeserializeOwned, + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, NumberFor: FromStr, @@ -629,14 +620,13 @@ where /// /// If the spec names don't match, if `relaxed`, then it emits a warning, else it panics. /// If the spec versions don't match, it only ever emits a warning. -pub(crate) async fn ensure_matching_spec( +pub(crate) async fn ensure_matching_spec( uri: String, expected_spec_name: String, expected_spec_version: u32, relaxed: bool, ) { - let rpc = ws_client(&uri).await.unwrap(); - match StateApi::::runtime_version(&rpc, None) + match remote_externalities::rpc_api::get_runtime_version::(uri.clone(), None) .await .map(|version| (String::from(version.spec_name.clone()), version.spec_version)) .map(|(spec_name, spec_version)| (spec_name.to_lowercase(), spec_version)) @@ -661,27 +651,21 @@ pub(crate) async fn ensure_matching_spec( if expected_spec_version == version { log::info!(target: LOG_TARGET, "found matching spec version: {:?}", version); } else { - let msg = format!( + log::warn!( + target: LOG_TARGET, "spec version mismatch (local {} != remote {}). This could cause some issues.", - expected_spec_version, version + expected_spec_version, + version ); - if relaxed { - log::warn!(target: LOG_TARGET, "{}", msg); - } else { - panic!("{}", msg); - } } }, Err(why) => { - let msg = format!( + log::error!( + target: LOG_TARGET, "failed to fetch runtime version from {}: {:?}. Skipping the check", - uri, why + uri, + why ); - if relaxed { - log::error!(target: LOG_TARGET, "{}", msg); - } else { - panic!("{}", msg); - } }, } } @@ -762,11 +746,9 @@ pub(crate) fn state_machine_call_with_proof(Into::into)?; - let proof = proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + let proof = proving_backend.extract_proof(); let proof_size = proof.encoded_size(); let compact_proof = proof .clone() @@ -812,15 +792,15 @@ pub(crate) fn state_machine_call_with_proof>()), proof_nodes.len() ); - log::debug!(target: LOG_TARGET, "proof size: {}", humanize(proof_size)); - log::debug!(target: LOG_TARGET, "compact proof size: {}", humanize(compact_proof_size),); - log::debug!( + log::info!(target: LOG_TARGET, "proof size: {}", humanize(proof_size)); + log::info!(target: LOG_TARGET, "compact proof size: {}", humanize(compact_proof_size),); + log::info!( target: LOG_TARGET, "zstd-compressed compact proof {}", humanize(compressed_proof.len()), diff --git a/utils/frame/try-runtime/cli/src/parse.rs b/utils/frame/try-runtime/cli/src/parse.rs index 257a99566979f..15a0251ebc34a 100644 --- a/utils/frame/try-runtime/cli/src/parse.rs +++ b/utils/frame/try-runtime/cli/src/parse.rs @@ -17,8 +17,6 @@ //! Utils for parsing user input -use sp_version::StateVersion; - pub(crate) fn hash(block_hash: &str) -> Result { let (block_hash, offset) = if let Some(block_hash) = block_hash.strip_prefix("0x") { (block_hash, 2) @@ -44,10 +42,3 @@ pub(crate) fn url(s: &str) -> Result { Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") } } - -pub(crate) fn state_version(s: &str) -> Result { - s.parse::() - .map_err(|_| ()) - .and_then(StateVersion::try_from) - .map_err(|_| "Invalid state version.") -} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 46c5929969fbb..ac0ba5dbdb85a 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -20,6 +20,6 @@ strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.2" +wasm-gc-api = "0.1.11" sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } filetime = "0.2.16" -wasm-opt = "0.110" \ No newline at end of file diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 07219676413fc..197c1d1b220bb 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -656,39 +656,50 @@ fn compact_wasm_file( project: &Path, profile: Profile, cargo_manifest: &Path, - out_name: Option, + wasm_binary_name: Option, ) -> (Option, Option, WasmBinaryBloaty) { - let default_out_name = get_wasm_binary_name(cargo_manifest); - let out_name = out_name.unwrap_or_else(|| default_out_name.clone()); - let in_path = project + let default_wasm_binary_name = get_wasm_binary_name(cargo_manifest); + let wasm_file = project .join("target/wasm32-unknown-unknown") .join(profile.directory()) - .join(format!("{}.wasm", default_out_name)); - - let (wasm_compact_path, wasm_compact_compressed_path) = if profile.wants_compact() { - let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name,)); - wasm_opt::OptimizationOptions::new_opt_level_0() - .mvp_features_only() - .debug_info(true) - .add_pass(wasm_opt::Pass::StripDwarf) - .run(&in_path, &wasm_compact_path) + .join(format!("{}.wasm", default_wasm_binary_name)); + + let wasm_compact_file = if profile.wants_compact() { + let wasm_compact_file = project.join(format!( + "{}.compact.wasm", + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()), + )); + wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) .expect("Failed to compact generated WASM binary."); + Some(WasmBinary(wasm_compact_file)) + } else { + None + }; + + let wasm_compact_compressed_file = wasm_compact_file.as_ref().and_then(|compact_binary| { + let file_name = + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()); - let wasm_compact_compressed_path = - project.join(format!("{}.compact.compressed.wasm", out_name)); - if compress_wasm(&wasm_compact_path, &wasm_compact_compressed_path) { - (Some(WasmBinary(wasm_compact_path)), Some(WasmBinary(wasm_compact_compressed_path))) + let wasm_compact_compressed_file = + project.join(format!("{}.compact.compressed.wasm", file_name)); + + if compress_wasm(&compact_binary.0, &wasm_compact_compressed_file) { + Some(WasmBinary(wasm_compact_compressed_file)) } else { - (Some(WasmBinary(wasm_compact_path)), None) + None } + }); + + let bloaty_file_name = if let Some(name) = wasm_binary_name { + format!("{}.wasm", name) } else { - (None, None) + format!("{}.wasm", default_wasm_binary_name) }; - let bloaty_path = project.join(format!("{}.wasm", out_name)); - fs::copy(in_path, &bloaty_path).expect("Copying the bloaty file to the project dir."); + let bloaty_file = project.join(bloaty_file_name); + fs::copy(wasm_file, &bloaty_file).expect("Copying the bloaty file to the project dir."); - (wasm_compact_path, wasm_compact_compressed_path, WasmBinaryBloaty(bloaty_path)) + (wasm_compact_file, wasm_compact_compressed_file, WasmBinaryBloaty(bloaty_file)) } fn compress_wasm(wasm_binary_path: &Path, compressed_binary_out_path: &Path) -> bool {